Spaces:
Running
on
Zero
Running
on
Zero
File size: 4,222 Bytes
4bc6ab0 ca80d1d 7d583e3 ca80d1d 6ec2fb5 ca80d1d 6ec2fb5 ca80d1d 6ec2fb5 4bc6ab0 6ec2fb5 4bc6ab0 ca80d1d 6ec2fb5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 |
import os
import sys
import traceback
import warnings
warnings.filterwarnings("ignore")
from ui_manager import UIManager
def preload_models_to_cache():
"""
Pre-download models to HuggingFace cache before GPU allocation.
This runs on CPU and avoids downloading during @spaces.GPU execution.
"""
if not os.getenv('SPACE_ID'):
return # Skip if not on Spaces
print("📦 Pre-downloading models to cache (CPU only, no GPU usage)...")
try:
from diffusers import ControlNetModel
import torch
# Pre-download ControlNet models to cache
models_to_cache = [
("diffusers/controlnet-canny-sdxl-1.0", "Canny ControlNet"),
("diffusers/controlnet-depth-sdxl-1.0", "Depth ControlNet"),
]
for model_id, model_name in models_to_cache:
print(f" ⬇️ Downloading {model_name} ({model_id})...")
try:
_ = ControlNetModel.from_pretrained(
model_id,
torch_dtype=torch.float16,
use_safetensors=True,
local_files_only=False # Allow download
)
print(f" ✅ {model_name} cached")
except Exception as e:
print(f" ⚠️ {model_name} download failed (will retry on-demand): {e}")
print("✅ Model pre-caching complete")
except Exception as e:
print(f"⚠️ Model pre-caching failed: {e}")
print(" Models will be downloaded on first use instead.")
def launch_final_blend_sceneweaver(share: bool = True, debug: bool = False):
"""Launch SceneWeaver Application"""
print("🎨 Starting SceneWeaver...")
print("✨ AI-Powered Image Background Generation")
try:
# Pre-download models on Spaces to avoid downloading during GPU time
preload_models_to_cache()
# Test imports first
print("🔍 Testing imports...")
try:
# Test creating UIManager
print("🔍 Creating UIManager instance...")
ui = UIManager()
print("✅ UIManager instance created successfully")
# Note: On Hugging Face Spaces, models are pre-cached at startup
if os.getenv('SPACE_ID'):
print("\n🔧 Detected Hugging Face Spaces environment")
print("⚡ Models pre-cached - ready for fast inference")
print(" Expected inference time: ~300-350s (with cached models)")
print()
# Launch UI
print("🚀 Launching interface...")
interface = ui.launch(share=share, debug=debug)
print("✅ Interface launched successfully")
return interface
except ImportError as import_error:
print(f"❌ Import failed: {import_error}")
print(f"Traceback: {traceback.format_exc()}")
raise
except Exception as e:
print(f"❌ Failed to launch: {e}")
print(f"Full traceback: {traceback.format_exc()}")
raise
def launch_ui(share: bool = True, debug: bool = False):
"""Convenience function for Jupyter notebooks"""
return launch_final_blend_sceneweaver(share=share, debug=debug)
def main():
"""Main entry point"""
# Check if running in Jupyter/Colab
try:
get_ipython()
is_jupyter = True
except NameError:
is_jupyter = False
if not is_jupyter and len(sys.argv) > 1 and not any('-f' in arg for arg in sys.argv):
# Command line mode with arguments
share = '--no-share' not in sys.argv
debug = '--debug' in sys.argv
else:
# Default mode
share = True
debug = False
try:
interface = launch_final_blend_sceneweaver(share=share, debug=debug)
if not is_jupyter:
print("🛑 Press Ctrl+C to stop")
try:
interface.block_thread()
except KeyboardInterrupt:
print("👋 Stopped")
return interface
except Exception as e:
print(f"❌ Error: {e}")
if not is_jupyter:
sys.exit(1)
raise
if __name__ == "__main__":
main() |