Spaces:
Sleeping
Sleeping
| from flask import Flask, render_template, request, jsonify, Response, stream_with_context | |
| from google import genai | |
| import logging | |
| from pathlib import Path | |
| import sys | |
| from typing import Generator | |
| import json | |
| import os | |
| api_key = os.environ.get("GEMINI_API_KEY") | |
| #pi_key = "YOUR_API_KEY_HERE" | |
| # Configuration du logging | |
| logging.basicConfig( | |
| level=logging.DEBUG, | |
| format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', | |
| handlers=[ | |
| logging.StreamHandler(sys.stdout), | |
| logging.FileHandler(Path('app.log')) | |
| ] | |
| ) | |
| logger = logging.getLogger(__name__) | |
| app = Flask(__name__) | |
| class GeminiClient: | |
| def __init__(self, api_key: str): | |
| self.client = None | |
| self.init_client(api_key) | |
| def init_client(self, api_key: str) -> None: | |
| try: | |
| self.client = genai.Client( | |
| api_key=api_key, | |
| http_options={'api_version': 'v1alpha'} | |
| ) | |
| except Exception as e: | |
| logger.error(f"Erreur d'initialisation du client Gemini: {e}") | |
| raise RuntimeError(f"Impossible d'initialiser le client Gemini: {e}") | |
| def get_response(self, question: str, model_name: str) -> Generator: | |
| if not self.client: | |
| raise RuntimeError("Client Gemini non initialisé") | |
| try: | |
| response = self.client.models.generate_content_stream( | |
| model=model_name, | |
| config={'thinking_config': {'include_thoughts': True}}, | |
| contents=[question] | |
| ) | |
| return response | |
| except Exception as e: | |
| logger.error(f"Erreur lors de la génération de la réponse: {e}") | |
| raise | |
| def stream_response(response: Generator): | |
| thinking_text = "" | |
| answer_text = "" | |
| mode = 'starting' | |
| try: | |
| for chunk in response: | |
| if hasattr(chunk, 'candidates') and chunk.candidates: | |
| content = chunk.candidates[0].content | |
| if hasattr(content, 'parts'): | |
| for part in content.parts: | |
| has_thought = hasattr(part, 'thought') and part.thought | |
| text = getattr(part, 'text', '') | |
| if not text: | |
| continue | |
| if has_thought: | |
| thinking_text += text | |
| yield json.dumps({ | |
| 'type': 'thinking', | |
| 'content': thinking_text | |
| }) + '\n' | |
| else: | |
| answer_text += text | |
| yield json.dumps({ | |
| 'type': 'answer', | |
| 'content': answer_text | |
| }) + '\n' | |
| except Exception as e: | |
| logger.error(f"Erreur dans le streaming de la réponse: {e}") | |
| yield json.dumps({ | |
| 'type': 'error', | |
| 'content': "Une erreur est survenue lors de l'analyse." | |
| }) + '\n' | |
| gemini_client = GeminiClient(api_key) | |
| def home(): | |
| return render_template('index.html') | |
| def ask(): | |
| # À remplacer par votre clé API | |
| question = request.json.get('question') | |
| model_name = "gemini-2.0-flash-thinking-exp-01-21" | |
| try: | |
| response = gemini_client.get_response(question, model_name) | |
| return Response( | |
| stream_with_context(stream_response(response)), | |
| mimetype='text/event-stream' | |
| ) | |
| except Exception as e: | |
| logger.error(f"Erreur lors de la génération: {e}", exc_info=True) | |
| return jsonify({'error': "Une erreur est survenue. Veuillez réessayer."}), 500 | |
| if __name__ == '__main__': | |
| app.run(debug=True) |