Spaces:
Running
on
Zero
Running
on
Zero
Add missing type annotation
Browse files
app.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
| 1 |
import colorsys
|
| 2 |
import gc
|
|
|
|
| 3 |
|
| 4 |
import cv2
|
| 5 |
import gradio as gr
|
|
@@ -95,10 +96,10 @@ def pastel_color_for_prompt(prompt_text: str) -> tuple[int, int, int]:
|
|
| 95 |
|
| 96 |
|
| 97 |
class AppState:
|
| 98 |
-
def __init__(self):
|
| 99 |
self.reset()
|
| 100 |
|
| 101 |
-
def reset(self):
|
| 102 |
self.video_frames: list[Image.Image] = []
|
| 103 |
self.inference_session = None
|
| 104 |
self.video_fps: float | None = None
|
|
@@ -119,7 +120,7 @@ class AppState:
|
|
| 119 |
self.pending_box_start_obj_id: int | None = None
|
| 120 |
self.active_tab: str = "point_box"
|
| 121 |
|
| 122 |
-
def __repr__(self):
|
| 123 |
return f"AppState(video_frames={len(self.video_frames)}, video_fps={self.video_fps}, masks_by_frame={len(self.masks_by_frame)}, color_by_obj={len(self.color_by_obj)})"
|
| 124 |
|
| 125 |
@property
|
|
@@ -346,7 +347,7 @@ def _get_prompt_for_obj(state: AppState, obj_id: int) -> str | None:
|
|
| 346 |
return None
|
| 347 |
|
| 348 |
|
| 349 |
-
def _ensure_color_for_obj(state: AppState, obj_id: int):
|
| 350 |
"""Assign color to object based on its prompt if available, otherwise use object ID."""
|
| 351 |
prompt_text = _get_prompt_for_obj(state, obj_id)
|
| 352 |
|
|
@@ -595,7 +596,7 @@ def _get_active_prompts_display(state: AppState) -> str:
|
|
| 595 |
return "**Active prompts:** None"
|
| 596 |
|
| 597 |
|
| 598 |
-
def propagate_masks(GLOBAL_STATE: gr.State):
|
| 599 |
if GLOBAL_STATE is None:
|
| 600 |
return GLOBAL_STATE, "Load a video first.", gr.update()
|
| 601 |
|
|
@@ -861,7 +862,7 @@ def reset_session(GLOBAL_STATE: gr.State) -> tuple[AppState, Image.Image, int, i
|
|
| 861 |
return GLOBAL_STATE, preview_img, slider_minmax, slider_value, status, active_prompts
|
| 862 |
|
| 863 |
|
| 864 |
-
def _on_video_change_pointbox(GLOBAL_STATE: gr.State, video):
|
| 865 |
GLOBAL_STATE, min_idx, max_idx, first_frame, status = init_video_session(GLOBAL_STATE, video, "point_box")
|
| 866 |
return (
|
| 867 |
GLOBAL_STATE,
|
|
@@ -871,7 +872,7 @@ def _on_video_change_pointbox(GLOBAL_STATE: gr.State, video):
|
|
| 871 |
)
|
| 872 |
|
| 873 |
|
| 874 |
-
def _on_video_change_text(GLOBAL_STATE: gr.State, video):
|
| 875 |
GLOBAL_STATE, min_idx, max_idx, first_frame, status = init_video_session(GLOBAL_STATE, video, "text")
|
| 876 |
active_prompts = _get_active_prompts_display(GLOBAL_STATE)
|
| 877 |
return (
|
|
|
|
| 1 |
import colorsys
|
| 2 |
import gc
|
| 3 |
+
from collections.abc import Iterator
|
| 4 |
|
| 5 |
import cv2
|
| 6 |
import gradio as gr
|
|
|
|
| 96 |
|
| 97 |
|
| 98 |
class AppState:
|
| 99 |
+
def __init__(self) -> None:
|
| 100 |
self.reset()
|
| 101 |
|
| 102 |
+
def reset(self) -> None:
|
| 103 |
self.video_frames: list[Image.Image] = []
|
| 104 |
self.inference_session = None
|
| 105 |
self.video_fps: float | None = None
|
|
|
|
| 120 |
self.pending_box_start_obj_id: int | None = None
|
| 121 |
self.active_tab: str = "point_box"
|
| 122 |
|
| 123 |
+
def __repr__(self) -> str:
|
| 124 |
return f"AppState(video_frames={len(self.video_frames)}, video_fps={self.video_fps}, masks_by_frame={len(self.masks_by_frame)}, color_by_obj={len(self.color_by_obj)})"
|
| 125 |
|
| 126 |
@property
|
|
|
|
| 347 |
return None
|
| 348 |
|
| 349 |
|
| 350 |
+
def _ensure_color_for_obj(state: AppState, obj_id: int) -> None:
|
| 351 |
"""Assign color to object based on its prompt if available, otherwise use object ID."""
|
| 352 |
prompt_text = _get_prompt_for_obj(state, obj_id)
|
| 353 |
|
|
|
|
| 596 |
return "**Active prompts:** None"
|
| 597 |
|
| 598 |
|
| 599 |
+
def propagate_masks(GLOBAL_STATE: gr.State) -> Iterator[tuple[AppState, str, dict]]:
|
| 600 |
if GLOBAL_STATE is None:
|
| 601 |
return GLOBAL_STATE, "Load a video first.", gr.update()
|
| 602 |
|
|
|
|
| 862 |
return GLOBAL_STATE, preview_img, slider_minmax, slider_value, status, active_prompts
|
| 863 |
|
| 864 |
|
| 865 |
+
def _on_video_change_pointbox(GLOBAL_STATE: gr.State, video: str | dict) -> tuple[AppState, dict, Image.Image, str]:
|
| 866 |
GLOBAL_STATE, min_idx, max_idx, first_frame, status = init_video_session(GLOBAL_STATE, video, "point_box")
|
| 867 |
return (
|
| 868 |
GLOBAL_STATE,
|
|
|
|
| 872 |
)
|
| 873 |
|
| 874 |
|
| 875 |
+
def _on_video_change_text(GLOBAL_STATE: gr.State, video: str | dict) -> tuple[AppState, dict, Image.Image, str, str]:
|
| 876 |
GLOBAL_STATE, min_idx, max_idx, first_frame, status = init_video_session(GLOBAL_STATE, video, "text")
|
| 877 |
active_prompts = _get_active_prompts_display(GLOBAL_STATE)
|
| 878 |
return (
|