File size: 2,294 Bytes
0bd9582
 
 
 
 
 
692f33e
 
 
 
0bd9582
 
a1595c5
0bd9582
692f33e
66500dc
 
 
 
0bd9582
 
692f33e
0bd9582
692f33e
0bd9582
 
 
 
692f33e
 
 
 
0bd9582
 
692f33e
0bd9582
 
692f33e
 
 
 
0bd9582
 
 
 
 
 
692f33e
0bd9582
 
 
 
 
 
692f33e
0bd9582
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
import gradio as gr
import requests
import os

# Define models
MODEL_OPTIONS = {
    "Gemma 3-4B": "google/gemma-3-4b-it:free",
    "Mistral 7B": "mistralai/mistral-7b-instruct:free",
    "Qwen 0.6B": "qwen/qwen3-0.6b-04-28:free",
    "DeepSeek 70B": "deepseek/deepseek-r1-distill-llama-70b:free"
}

EVALUATION_MODEL = "qwen/qwen3-32b:free"

#Load the open router api
OPENROUTER_API_KEY = os.getenv("OPENROUTERAI")
if not OPENROUTER_API_KEY:
    raise RuntimeError("Missing OpenRouter API key. Please set OPENROUTERAI in your environment variables.")


# API query function
def query_openrouter(prompt, model_id):
    headers = {
        "Authorization": f"Bearer {OPENROUTER_API_KEY}",
        "Content-Type": "application/json"
    }

    payload = {
        "model": model_id,
        "messages": [
            {"role": "user", "content": prompt}
        ]
    }

    response = requests.post("https://openrouter.ai/api/v1/chat/completions", headers=headers, json=payload)

    result = response.json()
    try:
        return result["choices"][0]["message"]["content"]
    except Exception as e:
        return f"❌ Error: {result.get('error', str(e))}"

# Main logic
def generate_and_evaluate(prompt, model_name):
    model_id = MODEL_OPTIONS[model_name]

    # Step 1: Generate math problem
    generated = query_openrouter(prompt, model_id)

    # Step 2: Evaluate using Qwen 32B
    eval_prompt = (
        f"Evaluate the following math problem for originality, clarity, and difficulty. "
        f"Give a score out of 10 with a short explanation:\n\n{generated}"
    )
    evaluation = query_openrouter(eval_prompt, EVALUATION_MODEL)

    return generated.strip(), evaluation.strip()

# Gradio UI
gr.Interface(
    fn=generate_and_evaluate,
    inputs=[
        gr.Textbox(label="🧠 Math Concept Prompt", placeholder="e.g., Generate a creative trigonometry problem.", lines=2),
        gr.Dropdown(choices=list(MODEL_OPTIONS.keys()), label="🧠 Choose LLM")
    ],
    outputs=[
        gr.Textbox(label="πŸ“ Generated Math Problem", lines=6),
        gr.Textbox(label="πŸ” Qwen 32B Evaluation", lines=4)
    ],
    title="πŸ”’ Multi-LLM Math Generator + Evaluator",
    description="Generate math problems with different open-source LLMs and evaluate with Qwen 32B."
).launch()