rohitkhadka commited on
Commit
0bd9582
Β·
verified Β·
1 Parent(s): 79d7318

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +75 -0
app.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import requests
3
+ import os
4
+
5
+ # Define models
6
+ MODEL_OPTIONS = {
7
+ "Gemma 3-4B": "google/gemma-3-4b-it",
8
+ "Mistral 7B": "mistralai/mistral-7b-instruct",
9
+ "Qwen 0.6B": "qwen/qwen3-0.6b-04-28",
10
+ "DeepSeek 70B": "deepseek/deepseek-r1-distill-llama-70b"
11
+ }
12
+
13
+ EVALUATION_MODEL = "qwen/qwen3-32b"
14
+
15
+ # Token (set your HF token as an environment variable or paste here directly)
16
+ HF_TOKEN = os.getenv("HF_TOKEN") or "your_token_here" # Replace with your actual token if needed
17
+
18
+ # API query function
19
+ def query_huggingface(prompt, model_id):
20
+ headers = {
21
+ "Authorization": f"Bearer {HF_TOKEN}",
22
+ "Content-Type": "application/json"
23
+ }
24
+
25
+ payload = {
26
+ "inputs": prompt,
27
+ "parameters": {
28
+ "max_new_tokens": 150,
29
+ "do_sample": True,
30
+ "temperature": 0.7
31
+ }
32
+ }
33
+
34
+ response = requests.post(
35
+ f"https://api-inference.huggingface.co/models/{model_id}",
36
+ headers=headers,
37
+ json=payload
38
+ )
39
+
40
+ result = response.json()
41
+ if isinstance(result, list) and "generated_text" in result[0]:
42
+ return result[0]["generated_text"]
43
+ else:
44
+ return f"❌ Error: {result.get('error', 'Unknown error')}"
45
+
46
+ # Main logic
47
+ def generate_and_evaluate(prompt, model_name):
48
+ model_id = MODEL_OPTIONS[model_name]
49
+
50
+ # Step 1: Generate math problem
51
+ generated = query_huggingface(prompt, model_id)
52
+
53
+ # Step 2: Evaluate using Qwen 32B
54
+ eval_prompt = (
55
+ f"Evaluate the following math problem for originality, clarity, and difficulty. "
56
+ f"Give a score out of 10 with a short explanation:\n\n{generated}"
57
+ )
58
+ evaluation = query_huggingface(eval_prompt, EVALUATION_MODEL)
59
+
60
+ return generated.strip(), evaluation.strip()
61
+
62
+ # Gradio UI
63
+ gr.Interface(
64
+ fn=generate_and_evaluate,
65
+ inputs=[
66
+ gr.Textbox(label="🧠 Math Concept Prompt", placeholder="e.g., Generate a creative trigonometry problem.", lines=2),
67
+ gr.Dropdown(choices=list(MODEL_OPTIONS.keys()), label="🧠 Choose LLM")
68
+ ],
69
+ outputs=[
70
+ gr.Textbox(label="πŸ“ Generated Math Problem", lines=6),
71
+ gr.Textbox(label="πŸ” Qwen 32B Evaluation", lines=4)
72
+ ],
73
+ title="πŸ”’ Multi-LLM Math Generator + Evaluator",
74
+ description="Generate math problems with different open-source LLMs and evaluate with Qwen 32B."
75
+ ).launch()