games / app.py
gk2410's picture
Update app.py
ce04c51 verified
import os
import json
import gradio as gr
import nashpy as nash
import numpy as np
from huggingface_hub import InferenceClient
HF_TOKEN = os.getenv("HF_TOKEN")
client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct", token=HF_TOKEN)
def get_agent_data(p_a, p_b):
prompt = f"""
Analyze this conflict:
A: "{p_a}"
B: "{p_b}"
Extract 2 actions for each and assign happiness scores (0-10) for [A1/B1, A1/B2, A2/B1, A2/B2].
Return ONLY JSON:
{{"a_actions": ["A1", "A2"], "b_actions": ["B1", "B2"], "payoffs_a": [v1,v2,v3,v4], "payoffs_b": [v1,v2,v3,v4]}}
"""
response = client.text_generation(prompt, max_new_tokens=400)
return json.loads(response[response.find("{"):response.rfind("}")+1])
def resolve(p_a, p_b):
data = get_agent_data(p_a, p_b)
A = np.array(data["payoffs_a"]).reshape(2, 2)
B = np.array(data["payoffs_b"]).reshape(2, 2)
social = A + B
idx = np.unravel_index(np.argmax(social), social.shape)
# Map the indices to the actions
choice_a = data["a_actions"][idx[0]]
choice_b = data["b_actions"][idx[1]]
# LOGIC TRACE: Explain the numbers
explanation = f"""
### ⚖️ Resolution: {choice_a} + {choice_b}
---
### 🧠 Agent Logic Trace (The "Why")
I assigned the following 'Happiness Scores' to your situation:
* **{choice_a} + {choice_b}:** Combined Score of **{social[idx]}** (This is the highest possible total).
* **Other Options:** Lower combined scores (ranging from {np.min(social)} to {np.sort(social.flatten())[-2]}).
**The Decision:** This outcome was chosen because it represents the **Social Optimum**. Even if it feels like a compromise, mathematically, no other combination of actions makes you 'happier' as a unit.
"""
return explanation
with gr.Blocks(theme=gr.themes.Soft()) as demo:
gr.Markdown("# 🤝 Agentic Conflict Mediator")
with gr.Row():
a = gr.Textbox(label="Person A Perspective")
b = gr.Textbox(label="Person B Perspective")
btn = gr.Button("Analyze & Explain")
out = gr.Markdown()
btn.click(resolve, [a, b], out)
demo.launch()