Spaces:
Running
Running
| import os | |
| import io | |
| import torch | |
| from flask import Flask, request, jsonify | |
| from transformers import SegformerForImageClassification, SegformerImageProcessor | |
| from PIL import Image | |
| app = Flask(__name__) | |
| # ============================================================================= | |
| # 1. CONFIGURACIÓN Y CARGA DEL MODELO | |
| # ============================================================================= | |
| # Ruta donde subiste tu carpeta 'model' en Hugging Face | |
| MODEL_PATH = "./model" | |
| BASE_MODEL_NAME = "nvidia/mit-b0" # Respaldo para el procesador | |
| print("🔄 Cargando sistema de IA...") | |
| try: | |
| # Cargar Procesador (Intenta local, si falla usa el de NVIDIA) | |
| try: | |
| processor = SegformerImageProcessor.from_pretrained(MODEL_PATH) | |
| except: | |
| print("⚠️ Procesador local no encontrado, usando base nvidia/mit-b0") | |
| processor = SegformerImageProcessor.from_pretrained(BASE_MODEL_NAME) | |
| # Cargar Modelo Entrenado | |
| model = SegformerForImageClassification.from_pretrained(MODEL_PATH) | |
| model.eval() # Modo evaluación (ahorra memoria y es más rápido) | |
| print("✅ Modelo SegFormer cargado exitosamente.") | |
| print(f" - Clases disponibles: {model.config.id2label}") | |
| except Exception as e: | |
| print(f"❌ ERROR FATAL CARGANDO MODELO: {e}") | |
| # ============================================================================= | |
| # 2. RUTAS DE LA API | |
| # ============================================================================= | |
| def home(): | |
| return "API NOM-083 (SegFormer) - Estado: ACTIVO 🟢" | |
| def predict(): | |
| # Validación básica | |
| if 'image' not in request.files: | |
| return jsonify({"error": "No se envió el campo 'image'"}), 400 | |
| try: | |
| file = request.files['image'] | |
| # --- CORRECCIÓN "A PRUEBA DE BALAS" --- | |
| # Leemos los bytes del archivo directamente a la memoria RAM. | |
| # Esto evita el error "cannot identify image file" con archivos temporales. | |
| img_bytes = file.read() | |
| image = Image.open(io.BytesIO(img_bytes)).convert("RGB") | |
| # Preprocesamiento | |
| inputs = processor(images=image, return_tensors="pt") | |
| # Inferencia | |
| with torch.no_grad(): | |
| outputs = model(**inputs) | |
| logits = outputs.logits | |
| # Resultados (Softmax para porcentaje) | |
| probs = torch.nn.functional.softmax(logits, dim=-1) | |
| top_score, top_idx = probs.max(1) | |
| clase_nombre = model.config.id2label[top_idx.item()] | |
| confianza = top_score.item() * 100 | |
| # Respuesta JSON limpia | |
| return jsonify({ | |
| "status": "success", | |
| "clase": clase_nombre, | |
| "confianza": round(confianza, 2), | |
| "mensaje": f"Análisis: {clase_nombre}" | |
| }) | |
| except Exception as e: | |
| print(f"❌ Error interno: {e}") | |
| return jsonify({"error": str(e), "status": "error"}), 500 | |
| # ============================================================================= | |
| # 3. INICIO DEL SERVIDOR | |
| # ============================================================================= | |
| if __name__ == '__main__': | |
| # Puerto 7860 es el estándar de Hugging Face Spaces | |
| app.run(host='0.0.0.0', port=7860) |