ZeroGPU-LLM-Inference / Dockerfile
Alikestocode's picture
Fix delete_revisions import with fallback cache cleanup
7a2a590
# Dockerfile for Google Cloud deployment
FROM python:3.10-slim
# Install system dependencies
RUN apt-get update && apt-get install -y \
build-essential \
git \
curl \
&& rm -rf /var/lib/apt/lists/*
# Set working directory
WORKDIR /app
# Copy requirements first for better caching
COPY requirements.txt .
# Install Python dependencies
# Note: flash-attn requires CUDA for compilation, so skip it for CPU-only Cloud Run deployments
# The app gracefully handles missing flash-attn (see app.py FLASH_ATTN_AVAILABLE check)
RUN sed '/^flash-attn/d' requirements.txt > requirements-no-flash.txt && \
pip install --no-cache-dir -r requirements-no-flash.txt && \
echo "Installed dependencies (skipped flash-attn - requires CUDA)"
# Copy application code
COPY . .
# Expose port (Gradio default is 7860, Cloud Run uses PORT env var)
EXPOSE 7860
# Set environment variables
ENV PYTHONUNBUFFERED=1
ENV GRADIO_SERVER_NAME=0.0.0.0
ENV GRADIO_SERVER_PORT=7860
# Run the application
CMD ["python", "app.py"]