Spaces:
Running
on
Zero
Running
on
Zero
Update inpainting_module.py
Browse files- inpainting_module.py +10 -3
inpainting_module.py
CHANGED
|
@@ -186,13 +186,17 @@ class InpaintingModule:
|
|
| 186 |
for _ in range(rounds):
|
| 187 |
gc.collect()
|
| 188 |
|
| 189 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 190 |
torch.cuda.empty_cache()
|
| 191 |
if aggressive:
|
| 192 |
torch.cuda.ipc_collect()
|
| 193 |
torch.cuda.synchronize()
|
| 194 |
|
| 195 |
-
logger.debug(f"Memory cleanup completed (aggressive={aggressive})")
|
| 196 |
|
| 197 |
def _check_memory_status(self) -> Dict[str, float]:
|
| 198 |
"""
|
|
@@ -203,7 +207,10 @@ class InpaintingModule:
|
|
| 203 |
dict
|
| 204 |
Memory statistics including allocated, total, and usage ratio
|
| 205 |
"""
|
| 206 |
-
|
|
|
|
|
|
|
|
|
|
| 207 |
return {"available": True, "usage_ratio": 0.0}
|
| 208 |
|
| 209 |
allocated = torch.cuda.memory_allocated() / 1024**3
|
|
|
|
| 186 |
for _ in range(rounds):
|
| 187 |
gc.collect()
|
| 188 |
|
| 189 |
+
# On Hugging Face Spaces, avoid CUDA operations in main process
|
| 190 |
+
# CUDA operations must only happen within @spaces.GPU decorated functions
|
| 191 |
+
is_spaces = os.getenv('SPACE_ID') is not None
|
| 192 |
+
|
| 193 |
+
if not is_spaces and torch.cuda.is_available():
|
| 194 |
torch.cuda.empty_cache()
|
| 195 |
if aggressive:
|
| 196 |
torch.cuda.ipc_collect()
|
| 197 |
torch.cuda.synchronize()
|
| 198 |
|
| 199 |
+
logger.debug(f"Memory cleanup completed (aggressive={aggressive}, spaces={is_spaces})")
|
| 200 |
|
| 201 |
def _check_memory_status(self) -> Dict[str, float]:
|
| 202 |
"""
|
|
|
|
| 207 |
dict
|
| 208 |
Memory statistics including allocated, total, and usage ratio
|
| 209 |
"""
|
| 210 |
+
# On Spaces, skip CUDA checks in main process
|
| 211 |
+
is_spaces = os.getenv('SPACE_ID') is not None
|
| 212 |
+
|
| 213 |
+
if is_spaces or not torch.cuda.is_available():
|
| 214 |
return {"available": True, "usage_ratio": 0.0}
|
| 215 |
|
| 216 |
allocated = torch.cuda.memory_allocated() / 1024**3
|