medgemma-space / app.py
Rupesh8299's picture
Initial MedGemma Space setup
becd4ee
import os
from PIL import Image
import gradio as gr
import torch
from transformers import pipeline
from huggingface_hub import login
MODEL_ID = "google/medgemma-4b-pt"
# If model is gated, add token as secret in HF Space settings
hf_token = os.environ.get("HUGGING_FACE_HUB_TOKEN")
if hf_token:
login(token=hf_token)
def load_model():
device = 0 if torch.cuda.is_available() else -1
try:
return pipeline(
"image-text-to-text",
model=MODEL_ID,
device=device,
torch_dtype=torch.bfloat16
)
except:
return pipeline("image-text-to-text", model=MODEL_ID, device=device)
pipe = load_model()
def analyze_image(image: Image.Image):
if pipe is None:
return "Model failed to load."
try:
result = pipe(images=image, text="<start_of_image> findings:", max_new_tokens=200)
return result[0].get("generated_text", str(result))
except Exception as e:
return f"Error: {e}"
demo = gr.Interface(
fn=analyze_image,
inputs=gr.Image(type="pil", label="Upload image"),
outputs=gr.Textbox(label="AI Findings"),
title="MedGemma Image Analyzer",
description="Upload a medical image and get AI-generated findings."
)
if __name__ == "__main__":
demo.launch()