Spaces:
Sleeping
Sleeping
| """ | |
| MCP Server for Second Opinion AI Agent | |
| Provides tools for analyzing ideas, detecting biases, and generating alternatives | |
| Tools use LLM to generate context-aware responses based on user input | |
| """ | |
| from mcp.server.fastmcp import FastMCP | |
| from pydantic import BaseModel, Field | |
| from typing import List, Dict, Optional, Literal | |
| import json | |
| import os | |
| from datetime import datetime | |
| # Initialize FastMCP server | |
| mcp = FastMCP("second-opinion-tools") | |
| # ============================================================================= | |
| # LLM INTEGRATION FOR CONTEXTUAL ANALYSIS | |
| # ============================================================================= | |
| def get_llm_client(): | |
| """Get an LLM client based on available API keys""" | |
| # Try Google Gemini first (often has free tier) | |
| google_key = os.environ.get("GOOGLE_API_KEY") | |
| if google_key: | |
| try: | |
| import google.generativeai as genai | |
| genai.configure(api_key=google_key) | |
| return ("gemini", genai) | |
| except ImportError: | |
| pass | |
| # Try OpenAI | |
| openai_key = os.environ.get("OPENAI_API_KEY") | |
| if openai_key: | |
| try: | |
| from openai import OpenAI | |
| return ("openai", OpenAI(api_key=openai_key)) | |
| except ImportError: | |
| pass | |
| # Try Anthropic | |
| anthropic_key = os.environ.get("ANTHROPIC_API_KEY") | |
| if anthropic_key: | |
| try: | |
| import anthropic | |
| return ("anthropic", anthropic.Anthropic(api_key=anthropic_key)) | |
| except ImportError: | |
| pass | |
| return (None, None) | |
| def call_llm(prompt: str, max_tokens: int = 2000) -> str: | |
| """Call the available LLM with a prompt""" | |
| provider, client = get_llm_client() | |
| if provider is None: | |
| return None # No LLM available, will fall back to template | |
| try: | |
| if provider == "gemini": | |
| model = client.GenerativeModel("gemini-2.0-flash-lite") | |
| response = model.generate_content(prompt) | |
| return response.text | |
| elif provider == "openai": | |
| response = client.chat.completions.create( | |
| model="gpt-4o-mini", | |
| messages=[{"role": "user", "content": prompt}], | |
| max_tokens=max_tokens, | |
| temperature=0.7 | |
| ) | |
| return response.choices[0].message.content | |
| elif provider == "anthropic": | |
| response = client.messages.create( | |
| model="claude-haiku-4-5-20251001", | |
| max_tokens=max_tokens, | |
| messages=[{"role": "user", "content": prompt}] | |
| ) | |
| return response.content[0].text | |
| except Exception as e: | |
| print(f"LLM call failed: {e}") | |
| return None | |
| return None | |
| def generate_contextual_analysis(tool_name: str, idea: str, extra_context: str, | |
| analysis_prompt: str, fallback_template: dict) -> str: | |
| """ | |
| Generate contextual analysis using LLM, with fallback to template. | |
| Args: | |
| tool_name: Name of the tool for logging | |
| idea: The user's idea to analyze | |
| extra_context: Additional context provided by user | |
| analysis_prompt: The specific prompt for this analysis type | |
| fallback_template: Template to use if LLM is unavailable | |
| Returns: | |
| JSON string with analysis results | |
| """ | |
| full_prompt = f"""{analysis_prompt} | |
| IDEA TO ANALYZE: | |
| {idea} | |
| {f"ADDITIONAL CONTEXT: {extra_context}" if extra_context else ""} | |
| Respond with a valid JSON object only. No markdown, no code blocks, just the JSON.""" | |
| llm_response = call_llm(full_prompt) | |
| if llm_response: | |
| # Try to parse as JSON, clean up if needed | |
| try: | |
| # Remove markdown code blocks if present | |
| cleaned = llm_response.strip() | |
| if cleaned.startswith("```"): | |
| cleaned = cleaned.split("\n", 1)[1] # Remove first line | |
| if cleaned.endswith("```"): | |
| cleaned = cleaned.rsplit("```", 1)[0] | |
| cleaned = cleaned.strip() | |
| # Validate it's JSON | |
| parsed = json.loads(cleaned) | |
| parsed["_generated"] = "contextual" | |
| parsed["timestamp"] = datetime.now().isoformat() | |
| return json.dumps(parsed, indent=2) | |
| except json.JSONDecodeError: | |
| # If not valid JSON, wrap the response | |
| return json.dumps({ | |
| "timestamp": datetime.now().isoformat(), | |
| "_generated": "contextual", | |
| "analysis": llm_response | |
| }, indent=2) | |
| # Fallback to template | |
| fallback_template["_generated"] = "template" | |
| fallback_template["timestamp"] = datetime.now().isoformat() | |
| fallback_template["idea_analyzed"] = idea[:200] + "..." if len(idea) > 200 else idea | |
| return json.dumps(fallback_template, indent=2) | |
| # ============================================================================= | |
| # MCP TOOLS | |
| # ============================================================================= | |
| def analyze_assumptions(idea: str, context: str = "") -> str: | |
| """ | |
| Analyzes an idea to identify hidden assumptions and unstated premises. | |
| Args: | |
| idea: The idea or decision to analyze | |
| context: Additional context or background information | |
| Returns: | |
| JSON string containing identified assumptions, their implications, and questions to verify them | |
| """ | |
| analysis_prompt = """You are an expert critical thinking analyst. Analyze the given idea to identify ALL assumptions - both explicit and hidden. | |
| Your analysis must be specific to this exact idea. Identify: | |
| 1. Explicit assumptions stated directly | |
| 2. Implicit/hidden assumptions not stated but required for the idea to work | |
| 3. Foundational beliefs the idea rests upon | |
| 4. Contextual assumptions about timing, market, resources, etc. | |
| For each assumption, explain: | |
| - What the assumption is | |
| - Why it matters | |
| - What happens if it's wrong | |
| - How to verify it | |
| Return a JSON object with this structure: | |
| { | |
| "idea_summary": "brief summary of the idea", | |
| "explicit_assumptions": [ | |
| {"assumption": "...", "importance": "high/medium/low", "verification": "how to test this"} | |
| ], | |
| "hidden_assumptions": [ | |
| {"assumption": "...", "why_hidden": "...", "risk_if_wrong": "..."} | |
| ], | |
| "foundational_beliefs": ["belief 1", "belief 2"], | |
| "critical_questions": ["question 1", "question 2", "question 3"], | |
| "highest_risk_assumption": "the assumption most likely to be wrong or cause failure" | |
| }""" | |
| fallback = { | |
| "explicit_assumptions": ["Unable to analyze - LLM not available"], | |
| "hidden_assumptions": ["Please check API key configuration"], | |
| "foundational_beliefs": [], | |
| "critical_questions": [], | |
| "highest_risk_assumption": "Analysis unavailable" | |
| } | |
| return generate_contextual_analysis( | |
| "analyze_assumptions", idea, context, analysis_prompt, fallback | |
| ) | |
| def detect_cognitive_biases(idea: str, reasoning: str = "") -> str: | |
| """ | |
| Detects potential cognitive biases in reasoning and decision-making. | |
| Args: | |
| idea: The idea or decision being proposed | |
| reasoning: The reasoning or justification provided | |
| Returns: | |
| JSON string containing detected biases, their descriptions, and mitigation strategies | |
| """ | |
| analysis_prompt = """You are a cognitive bias expert. Analyze the given idea and reasoning to detect specific cognitive biases that may be affecting the thinking. | |
| Look for evidence of these common biases: | |
| - Confirmation bias (seeking confirming evidence) | |
| - Anchoring bias (over-relying on first information) | |
| - Sunk cost fallacy (continuing due to past investment) | |
| - Availability bias (overweighting recent/memorable events) | |
| - Optimism bias (underestimating risks) | |
| - Survivorship bias (only seeing successes) | |
| - Dunning-Kruger effect (overestimating competence) | |
| - Status quo bias (preferring current state) | |
| - Bandwagon effect (following the crowd) | |
| - Recency bias (overweighting recent events) | |
| For each bias detected, provide SPECIFIC evidence from the idea/reasoning. | |
| Return a JSON object with this structure: | |
| { | |
| "idea_summary": "brief summary", | |
| "detected_biases": [ | |
| { | |
| "bias_name": "name of bias", | |
| "evidence": "specific quote or aspect that shows this bias", | |
| "severity": "high/medium/low", | |
| "how_it_distorts": "how this bias is affecting the decision" | |
| } | |
| ], | |
| "most_concerning_bias": "the bias most likely to lead to a bad decision", | |
| "debiasing_strategies": [ | |
| "specific action to counter the biases found" | |
| ], | |
| "questions_to_ask": [ | |
| "question that would help overcome these biases" | |
| ] | |
| }""" | |
| fallback = { | |
| "detected_biases": [{"bias_name": "Analysis unavailable", "evidence": "LLM not configured", "severity": "unknown"}], | |
| "most_concerning_bias": "Unable to analyze", | |
| "debiasing_strategies": ["Check API configuration"], | |
| "questions_to_ask": [] | |
| } | |
| return generate_contextual_analysis( | |
| "detect_cognitive_biases", idea, reasoning, analysis_prompt, fallback | |
| ) | |
| def generate_alternatives(idea: str, constraints: str = "", num_alternatives: int = 5) -> str: | |
| """ | |
| Generates alternative approaches and solutions to consider. | |
| Args: | |
| idea: The original idea or approach | |
| constraints: Known constraints or requirements | |
| num_alternatives: Number of alternatives to generate (1-10) | |
| Returns: | |
| JSON string containing diverse alternative approaches with pros/cons analysis | |
| """ | |
| num_alternatives = max(1, min(10, num_alternatives)) | |
| analysis_prompt = f"""You are a creative strategist. Generate {num_alternatives} genuinely different alternatives to the proposed idea. | |
| Don't just tweak the original - think of fundamentally different approaches that could achieve similar goals. | |
| Consider: | |
| - What if we did the opposite? | |
| - What's the minimum viable version? | |
| - What would a 10x version look like? | |
| - How would different industries solve this? | |
| - What if we removed a key constraint? | |
| {f"CONSTRAINTS TO WORK WITHIN: {constraints}" if constraints else ""} | |
| Return a JSON object with this structure: | |
| {{ | |
| "original_idea_summary": "brief summary of original", | |
| "goal_identified": "the underlying goal this idea is trying to achieve", | |
| "alternatives": [ | |
| {{ | |
| "name": "descriptive name", | |
| "description": "what this alternative involves", | |
| "how_different": "how this differs from the original", | |
| "pros": ["advantage 1", "advantage 2"], | |
| "cons": ["disadvantage 1", "disadvantage 2"], | |
| "feasibility": "high/medium/low", | |
| "best_if": "scenario where this alternative would be best" | |
| }} | |
| ], | |
| "recommended_alternative": "which alternative seems most promising and why", | |
| "hybrid_suggestion": "how to combine elements from multiple alternatives" | |
| }}""" | |
| fallback = { | |
| "original_idea_summary": "Analysis unavailable", | |
| "alternatives": [{"name": "LLM not available", "description": "Please configure API keys"}], | |
| "recommended_alternative": "Unable to analyze" | |
| } | |
| return generate_contextual_analysis( | |
| "generate_alternatives", idea, constraints, analysis_prompt, fallback | |
| ) | |
| def perform_premortem_analysis(idea: str, timeframe: str = "1 year") -> str: | |
| """ | |
| Performs a pre-mortem analysis: imagine the idea failed and identify why. | |
| Args: | |
| idea: The idea or project to analyze | |
| timeframe: When in the future to imagine the failure (e.g., "6 months", "1 year") | |
| Returns: | |
| JSON string containing potential failure modes, warning signs, and preventive measures | |
| """ | |
| analysis_prompt = f"""You are a risk analyst performing a pre-mortem analysis. Imagine it's {timeframe} from now and this idea has COMPLETELY FAILED. | |
| Your job is to work backwards and identify all the reasons why it failed. Be specific to THIS idea - don't give generic failure modes. | |
| Consider failures in: | |
| - Execution (team, skills, timeline) | |
| - Market/External factors (competition, regulation, timing) | |
| - Strategy (wrong problem, wrong solution) | |
| - Resources (money, people, technology) | |
| - Assumptions (what turned out to be wrong) | |
| Return a JSON object with this structure: | |
| {{ | |
| "scenario": "It's {timeframe} from now, and the idea has failed because...", | |
| "primary_cause_of_failure": "the single biggest reason it failed", | |
| "failure_modes": [ | |
| {{ | |
| "category": "execution/market/strategy/resources/assumptions", | |
| "what_went_wrong": "specific failure", | |
| "probability": "high/medium/low", | |
| "impact": "catastrophic/major/moderate/minor" | |
| }} | |
| ], | |
| "early_warning_signs": [ | |
| "specific signal that would indicate this failure is coming" | |
| ], | |
| "preventive_actions": [ | |
| {{ | |
| "action": "what to do now", | |
| "prevents": "which failure mode this addresses" | |
| }} | |
| ], | |
| "kill_criteria": "conditions under which you should abandon this idea", | |
| "plan_b": "what to do if this fails" | |
| }}""" | |
| fallback = { | |
| "scenario": f"Analysis for {timeframe} timeframe unavailable", | |
| "failure_modes": [{"category": "unknown", "what_went_wrong": "LLM not configured"}], | |
| "early_warning_signs": [], | |
| "preventive_actions": [] | |
| } | |
| return generate_contextual_analysis( | |
| "perform_premortem_analysis", idea, timeframe, analysis_prompt, fallback | |
| ) | |
| def identify_stakeholders_and_impacts(idea: str, organization_context: str = "") -> str: | |
| """ | |
| Identifies all stakeholders and analyzes potential impacts on each group. | |
| Args: | |
| idea: The idea or decision to analyze | |
| organization_context: Context about the organization or situation | |
| Returns: | |
| JSON string containing stakeholder analysis with impacts, concerns, and engagement strategies | |
| """ | |
| analysis_prompt = """You are a stakeholder analysis expert. Identify ALL parties who will be affected by this idea - both obvious and non-obvious stakeholders. | |
| For each stakeholder, analyze: | |
| - How they'll be impacted (positively or negatively) | |
| - What their likely concerns will be | |
| - Whether they have power to help or block this | |
| - How to engage them effectively | |
| Don't forget often-overlooked stakeholders like: | |
| - People who maintain/support this long-term | |
| - Those whose workload changes | |
| - Competitors and their customers | |
| - Regulators or compliance teams | |
| - Future employees/customers | |
| Return a JSON object with this structure: | |
| { | |
| "idea_summary": "brief summary", | |
| "stakeholders": [ | |
| { | |
| "group": "stakeholder name", | |
| "relationship": "how they relate to this idea", | |
| "impact": "positive/negative/mixed", | |
| "impact_description": "specific ways they're affected", | |
| "likely_concerns": ["concern 1", "concern 2"], | |
| "power_level": "high/medium/low", | |
| "engagement_strategy": "how to work with them" | |
| } | |
| ], | |
| "most_affected": "who has the most at stake", | |
| "potential_blockers": ["stakeholders who might resist"], | |
| "potential_champions": ["stakeholders who might advocate"], | |
| "conflicts_to_manage": [ | |
| { | |
| "between": "stakeholder A vs stakeholder B", | |
| "conflict": "what they disagree about", | |
| "resolution_approach": "how to address" | |
| } | |
| ], | |
| "stakeholder_not_consulted": "who should be involved but often isn't" | |
| }""" | |
| fallback = { | |
| "stakeholders": [{"group": "Analysis unavailable", "impact": "unknown"}], | |
| "most_affected": "Unable to analyze", | |
| "conflicts_to_manage": [] | |
| } | |
| return generate_contextual_analysis( | |
| "identify_stakeholders_and_impacts", idea, organization_context, analysis_prompt, fallback | |
| ) | |
| def second_order_thinking(idea: str, time_horizon: str = "2-5 years") -> str: | |
| """ | |
| Analyzes second and third-order consequences of an idea or decision. | |
| Args: | |
| idea: The idea or decision to analyze | |
| time_horizon: Time period to consider for consequences | |
| Returns: | |
| JSON string containing cascade of consequences and system-level effects | |
| """ | |
| analysis_prompt = f"""You are a systems thinker analyzing cascading consequences. For the given idea, think through what happens AFTER the immediate effects. | |
| First-order effects are obvious. Your job is to find the second, third, and nth-order effects that aren't obvious. | |
| Think about: | |
| - How will people ADAPT to this change? | |
| - What new behaviors will emerge? | |
| - What feedback loops will be created? | |
| - What becomes possible that wasn't before? | |
| - What becomes impossible? | |
| - What unintended consequences might occur? | |
| Time horizon to consider: {time_horizon} | |
| Return a JSON object with this structure: | |
| {{ | |
| "idea_summary": "brief summary", | |
| "first_order_effects": [ | |
| "immediate, obvious consequence 1", | |
| "immediate, obvious consequence 2" | |
| ], | |
| "second_order_effects": [ | |
| {{ | |
| "effect": "what happens as a result of first-order effects", | |
| "caused_by": "which first-order effect leads to this", | |
| "timeline": "when this would manifest" | |
| }} | |
| ], | |
| "third_order_effects": [ | |
| {{ | |
| "effect": "deeper consequence", | |
| "chain": "first order -> second order -> this", | |
| "probability": "high/medium/low" | |
| }} | |
| ], | |
| "feedback_loops": [ | |
| {{ | |
| "type": "reinforcing/balancing", | |
| "description": "what cycle gets created", | |
| "implication": "why this matters" | |
| }} | |
| ], | |
| "unintended_consequences": [ | |
| {{ | |
| "consequence": "what might happen unexpectedly", | |
| "positive_or_negative": "positive/negative", | |
| "how_to_monitor": "how to detect this early" | |
| }} | |
| ], | |
| "what_becomes_possible": ["new opportunity 1"], | |
| "what_becomes_impossible": ["closed door 1"], | |
| "biggest_long_term_risk": "the consequence most likely to cause regret" | |
| }}""" | |
| fallback = { | |
| "first_order_effects": ["Analysis unavailable - LLM not configured"], | |
| "second_order_effects": [], | |
| "third_order_effects": [], | |
| "feedback_loops": [], | |
| "unintended_consequences": [] | |
| } | |
| return generate_contextual_analysis( | |
| "second_order_thinking", idea, time_horizon, analysis_prompt, fallback | |
| ) | |
| def opportunity_cost_analysis(idea: str, resources: str = "", alternatives: str = "") -> str: | |
| """ | |
| Analyzes opportunity costs: what you give up by choosing this path. | |
| Args: | |
| idea: The idea or decision being considered | |
| resources: Available resources (time, money, people, etc.) | |
| alternatives: Other options being considered | |
| Returns: | |
| JSON string containing opportunity cost analysis and trade-off framework | |
| """ | |
| extra_context = f"Resources available: {resources}\nAlternatives mentioned: {alternatives}" if resources or alternatives else "" | |
| analysis_prompt = """You are an economist analyzing opportunity costs. For every choice, something is given up. Identify what's being sacrificed by pursuing this idea. | |
| Consider opportunity costs across: | |
| - Time (what else could this time be spent on?) | |
| - Money (what else could this money fund?) | |
| - Attention (what gets less focus?) | |
| - Talent (what else could these people work on?) | |
| - Reputation (what credibility is at stake?) | |
| - Optionality (what future choices are foreclosed?) | |
| Be specific to this idea - what are the ACTUAL trade-offs? | |
| Return a JSON object with this structure: | |
| { | |
| "idea_summary": "brief summary", | |
| "resource_commitments": { | |
| "time": { | |
| "amount": "estimated time commitment", | |
| "opportunity_cost": "what else could be done with this time", | |
| "is_worth_it": "yes/no/uncertain with reasoning" | |
| }, | |
| "money": { | |
| "amount": "estimated financial commitment", | |
| "opportunity_cost": "alternative uses for this money", | |
| "is_worth_it": "yes/no/uncertain with reasoning" | |
| }, | |
| "attention": { | |
| "amount": "how much focus this requires", | |
| "opportunity_cost": "what gets deprioritized", | |
| "is_worth_it": "yes/no/uncertain with reasoning" | |
| } | |
| }, | |
| "doors_that_close": [ | |
| "option that becomes unavailable by choosing this" | |
| ], | |
| "hidden_costs": [ | |
| "cost that isn't obvious upfront" | |
| ], | |
| "reversibility": { | |
| "is_reversible": "yes/partially/no", | |
| "cost_to_reverse": "what it would take to undo this", | |
| "point_of_no_return": "when does this become irreversible" | |
| }, | |
| "better_uses_of_resources": [ | |
| { | |
| "alternative": "what else you could do", | |
| "expected_value": "potential outcome", | |
| "why_not_doing_this": "reason this might not be chosen" | |
| } | |
| ], | |
| "key_question": "the most important trade-off question to answer before proceeding" | |
| }""" | |
| fallback = { | |
| "resource_commitments": {"time": {"opportunity_cost": "Analysis unavailable"}}, | |
| "doors_that_close": [], | |
| "hidden_costs": [], | |
| "reversibility": {"is_reversible": "unknown"} | |
| } | |
| return generate_contextual_analysis( | |
| "opportunity_cost_analysis", idea, extra_context, analysis_prompt, fallback | |
| ) | |
| def red_team_analysis(idea: str, attack_surface: str = "") -> str: | |
| """ | |
| Performs red team analysis: actively tries to break or exploit the idea. | |
| Args: | |
| idea: The idea, system, or plan to attack | |
| attack_surface: Known vulnerabilities or areas of concern | |
| Returns: | |
| JSON string containing attack vectors, vulnerabilities, and defensive measures | |
| """ | |
| analysis_prompt = """You are a red team analyst. Your job is to BREAK this idea. Think like an adversary, a competitor, a malicious user, or just Murphy's Law. | |
| Attack from multiple angles: | |
| - How could users game/exploit this? | |
| - How could competitors undermine this? | |
| - What technical/operational failures could occur? | |
| - What edge cases break the model? | |
| - How could this be weaponized or misused? | |
| - What happens at 10x or 100x scale? | |
| Be creative and ruthless. Find the weaknesses. | |
| Return a JSON object with this structure: | |
| { | |
| "idea_summary": "brief summary", | |
| "attack_vectors": [ | |
| { | |
| "attack_name": "descriptive name", | |
| "category": "gaming/competition/technical/scaling/misuse", | |
| "how_attack_works": "step by step how this exploits the idea", | |
| "likelihood": "high/medium/low", | |
| "impact": "catastrophic/major/moderate/minor", | |
| "example_scenario": "concrete example of this attack" | |
| } | |
| ], | |
| "critical_vulnerabilities": [ | |
| { | |
| "vulnerability": "what's weak", | |
| "why_its_critical": "why this matters", | |
| "fix": "how to address" | |
| } | |
| ], | |
| "what_breaks_at_scale": [ | |
| "thing that works now but fails at 10x/100x" | |
| ], | |
| "worst_case_scenario": { | |
| "scenario": "the absolute worst thing that could happen", | |
| "probability": "high/medium/low", | |
| "how_to_prevent": "what would stop this" | |
| }, | |
| "defensive_recommendations": [ | |
| { | |
| "defense": "what to implement", | |
| "addresses": "which attacks/vulnerabilities this covers", | |
| "priority": "immediate/soon/eventually" | |
| } | |
| ], | |
| "monitoring_needed": [ | |
| "signal to watch for that indicates attack/failure" | |
| ] | |
| }""" | |
| fallback = { | |
| "attack_vectors": [{"attack_name": "Analysis unavailable", "how_attack_works": "LLM not configured"}], | |
| "critical_vulnerabilities": [], | |
| "worst_case_scenario": {"scenario": "Unable to analyze"}, | |
| "defensive_recommendations": [] | |
| } | |
| return generate_contextual_analysis( | |
| "red_team_analysis", idea, attack_surface, analysis_prompt, fallback | |
| ) | |
| # ============================================================================= | |
| # RUN SERVER | |
| # ============================================================================= | |
| if __name__ == "__main__": | |
| mcp.run() |