Spaces:
No application file
No application file
File size: 1,777 Bytes
0fc9402 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 |
import os
from fastapi import FastAPI, Depends, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from auth import get_current_user
app = FastAPI(
title="Text Prediction API",
description="AI-powered text prediction service",
version="0.1.0"
)
# Configure CORS - use environment variable for allowed origins in production
allowed_origins = os.getenv("ALLOWED_ORIGINS", "").split(",") if os.getenv("ALLOWED_ORIGINS") else ["*"]
app.add_middleware(
CORSMiddleware,
allow_origins=allowed_origins,
allow_credentials=False,
allow_methods=["*"],
allow_headers=["*"],
)
class TextPredictRequest(BaseModel):
text: str
class TextPredictResponse(BaseModel):
prediction: str
confidence: float
input_text: str
@app.get("/")
async def root():
"""Health check endpoint."""
return {"status": "healthy", "service": "text-api"}
@app.get("/health")
async def health():
"""Health check endpoint."""
return {"status": "healthy"}
@app.post("/predict", response_model=TextPredictResponse)
async def predict(
request: TextPredictRequest,
current_user: dict = Depends(get_current_user)
):
"""
Protected endpoint for text prediction.
Requires valid Bearer token.
"""
# Placeholder prediction logic
# In a real application, this would call an ML model
text = request.text
# Simple mock prediction
prediction = f"Processed: {text[:50]}..." if len(text) > 50 else f"Processed: {text}"
confidence = 0.95
return TextPredictResponse(
prediction=prediction,
confidence=confidence,
input_text=text
)
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8001)
|