nvipin63 commited on
Commit
647c6ff
·
verified ·
1 Parent(s): e6355d4

Update mcp_server.py

Browse files
Files changed (1) hide show
  1. mcp_server.py +548 -586
mcp_server.py CHANGED
@@ -1,37 +1,153 @@
1
  """
2
  MCP Server for Second Opinion AI Agent
3
  Provides tools for analyzing ideas, detecting biases, and generating alternatives
 
4
  """
5
 
6
  from mcp.server.fastmcp import FastMCP
7
  from pydantic import BaseModel, Field
8
  from typing import List, Dict, Optional, Literal
9
  import json
 
10
  from datetime import datetime
11
 
12
  # Initialize FastMCP server
13
  mcp = FastMCP("second-opinion-tools")
14
 
15
- class IdeaAnalysis(BaseModel):
16
- """Model for idea analysis input"""
17
- idea: str = Field(..., description="The idea or decision to analyze")
18
- context: Optional[str] = Field(None, description="Additional context or background")
19
- goals: Optional[List[str]] = Field(None, description="Stated goals or objectives")
20
-
21
- class BiasDetection(BaseModel):
22
- """Model for bias detection results"""
23
- bias_type: str
24
- description: str
25
- evidence: str
26
- severity: Literal["low", "medium", "high"]
27
-
28
- class Alternative(BaseModel):
29
- """Model for alternative solutions"""
30
- title: str
31
- description: str
32
- pros: List[str]
33
- cons: List[str]
34
- feasibility: Literal["low", "medium", "high"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
 
36
  @mcp.tool()
37
  def analyze_assumptions(idea: str, context: str = "") -> str:
@@ -46,39 +162,46 @@ def analyze_assumptions(idea: str, context: str = "") -> str:
46
  JSON string containing identified assumptions, their implications, and questions to verify them
47
  """
48
 
49
- analysis = {
50
- "timestamp": datetime.now().isoformat(),
51
- "idea_summary": idea[:200] + "..." if len(idea) > 200 else idea,
52
- "analysis": {
53
- "explicit_assumptions": [
54
- "Assumptions that are stated directly in the idea"
55
- ],
56
- "implicit_assumptions": [
57
- "Assumptions that are implied but not stated",
58
- "Hidden premises that need to be examined"
59
- ],
60
- "foundational_beliefs": [
61
- "Core beliefs this idea rests upon",
62
- "Worldview assumptions"
63
- ],
64
- "contextual_assumptions": [
65
- "Assumptions about the environment",
66
- "Assumptions about timing and conditions"
67
- ]
68
- },
69
- "verification_questions": [
70
- "What evidence supports this assumption?",
71
- "What if this assumption is wrong?",
72
- "Under what conditions would this assumption fail?"
73
- ],
74
- "recommendations": [
75
- "Test key assumptions before proceeding",
76
- "Identify which assumptions are most critical",
77
- "Create contingency plans for false assumptions"
78
- ]
 
 
 
 
79
  }
80
 
81
- return json.dumps(analysis, indent=2)
 
 
 
82
 
83
  @mcp.tool()
84
  def detect_cognitive_biases(idea: str, reasoning: str = "") -> str:
@@ -93,71 +216,56 @@ def detect_cognitive_biases(idea: str, reasoning: str = "") -> str:
93
  JSON string containing detected biases, their descriptions, and mitigation strategies
94
  """
95
 
96
- common_biases = {
97
- "confirmation_bias": {
98
- "name": "Confirmation Bias",
99
- "description": "Tendency to search for or interpret information that confirms existing beliefs",
100
- "indicators": ["cherry-picking data", "ignoring contradictory evidence"],
101
- "severity": "high"
102
- },
103
- "anchoring_bias": {
104
- "name": "Anchoring Bias",
105
- "description": "Over-reliance on the first piece of information encountered",
106
- "indicators": ["fixating on initial numbers", "difficulty adjusting from first impression"],
107
- "severity": "medium"
108
- },
109
- "sunk_cost_fallacy": {
110
- "name": "Sunk Cost Fallacy",
111
- "description": "Continuing a course of action due to previously invested resources",
112
- "indicators": ["'we've come too far to turn back'", "justifying based on past investment"],
113
- "severity": "high"
114
- },
115
- "availability_bias": {
116
- "name": "Availability Bias",
117
- "description": "Overestimating likelihood of events based on recent or memorable examples",
118
- "indicators": ["relying on anecdotes", "recent events driving decisions"],
119
- "severity": "medium"
120
- },
121
- "optimism_bias": {
122
- "name": "Optimism Bias",
123
- "description": "Overestimating positive outcomes and underestimating risks",
124
- "indicators": ["'it won't happen to us'", "underestimating complexity"],
125
- "severity": "high"
126
- },
127
- "groupthink": {
128
- "name": "Groupthink",
129
- "description": "Desire for harmony leads to poor decision-making",
130
- "indicators": ["suppressing dissent", "illusion of unanimity"],
131
- "severity": "high"
132
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133
  }
134
 
135
- analysis = {
136
- "timestamp": datetime.now().isoformat(),
137
- "detected_biases": list(common_biases.values()),
138
- "mitigation_strategies": [
139
- "Actively seek disconfirming evidence",
140
- "Consult diverse perspectives",
141
- "Use pre-mortem analysis (imagine failure)",
142
- "Set decision criteria before gathering information",
143
- "Assign someone to play devil's advocate"
144
- ],
145
- "debiasing_questions": [
146
- "What evidence would change my mind?",
147
- "Am I being overconfident?",
148
- "What am I not seeing?",
149
- "Would I make this decision if starting fresh today?"
150
- ]
151
- }
152
-
153
- return json.dumps(analysis, indent=2)
154
 
155
  @mcp.tool()
156
- def generate_alternatives(
157
- idea: str,
158
- constraints: str = "",
159
- num_alternatives: int = 5
160
- ) -> str:
161
  """
162
  Generates alternative approaches and solutions to consider.
163
 
@@ -170,105 +278,50 @@ def generate_alternatives(
170
  JSON string containing diverse alternative approaches with pros/cons analysis
171
  """
172
 
173
- alternatives = {
174
- "original_idea": idea[:200] + "..." if len(idea) > 200 else idea,
175
- "constraints": constraints,
176
- "alternatives": [
177
- {
178
- "id": 1,
179
- "title": "Incremental Approach",
180
- "description": "Break down the idea into smaller, testable steps",
181
- "pros": [
182
- "Lower risk",
183
- "Learn as you go",
184
- "Can pivot based on feedback"
185
- ],
186
- "cons": [
187
- "Slower to full implementation",
188
- "May lose momentum",
189
- "Partial solutions might not prove value"
190
- ],
191
- "feasibility": "high"
192
- },
193
- {
194
- "id": 2,
195
- "title": "Opposite Approach",
196
- "description": "Consider doing the exact opposite of the proposed idea",
197
- "pros": [
198
- "Reveals hidden assumptions",
199
- "May uncover better path",
200
- "Challenges conventional thinking"
201
- ],
202
- "cons": [
203
- "May seem counterintuitive",
204
- "Requires reframing problem",
205
- "Not always applicable"
206
- ],
207
- "feasibility": "medium"
208
- },
209
- {
210
- "id": 3,
211
- "title": "Hybrid Solution",
212
- "description": "Combine elements from multiple approaches",
213
- "pros": [
214
- "Balances trade-offs",
215
- "Leverages strengths of each",
216
- "More flexible"
217
- ],
218
- "cons": [
219
- "Can be complex",
220
- "May lack clear focus",
221
- "Harder to execute"
222
- ],
223
- "feasibility": "medium"
224
- },
225
- {
226
- "id": 4,
227
- "title": "Wait and Learn",
228
- "description": "Delay decision while gathering more information",
229
- "pros": [
230
- "Reduces uncertainty",
231
- "Market may validate/invalidate",
232
- "More data for better decision"
233
- ],
234
- "cons": [
235
- "Opportunity cost",
236
- "Competitive disadvantage",
237
- "Analysis paralysis risk"
238
- ],
239
- "feasibility": "high"
240
- },
241
- {
242
- "id": 5,
243
- "title": "Minimum Viable Approach",
244
- "description": "Find the simplest possible version that tests key assumptions",
245
- "pros": [
246
- "Fast to implement",
247
- "Low resource commitment",
248
- "Quick validation"
249
- ],
250
- "cons": [
251
- "May not fully represent vision",
252
- "Limited scope",
253
- "Could underestimate true potential"
254
- ],
255
- "feasibility": "high"
256
- }
257
- ][:num_alternatives],
258
- "evaluation_framework": {
259
- "criteria": [
260
- "Alignment with goals",
261
- "Risk level",
262
- "Resource requirements",
263
- "Time to value",
264
- "Reversibility",
265
- "Learning potential"
266
- ],
267
- "process": "Score each alternative 1-10 on each criterion, then compare"
268
- }
269
  }
270
 
271
- return json.dumps(alternatives, indent=2)
 
 
 
272
 
273
  @mcp.tool()
274
  def perform_premortem_analysis(idea: str, timeframe: str = "1 year") -> str:
@@ -283,76 +336,56 @@ def perform_premortem_analysis(idea: str, timeframe: str = "1 year") -> str:
283
  JSON string containing potential failure modes, warning signs, and preventive measures
284
  """
285
 
286
- premortem = {
287
- "timestamp": datetime.now().isoformat(),
288
- "scenario": f"Imagine it's {timeframe} from now, and this idea has failed completely",
289
- "failure_modes": [
290
- {
291
- "category": "Execution Failures",
292
- "scenarios": [
293
- "Team lacked necessary skills",
294
- "Underestimated complexity and timeline",
295
- "Poor communication led to misalignment",
296
- "Key person left at critical moment"
297
- ]
298
- },
299
- {
300
- "category": "Market/External Failures",
301
- "scenarios": [
302
- "Market conditions changed unexpectedly",
303
- "Competitor launched better solution first",
304
- "Customer needs were misunderstood",
305
- "Regulatory changes blocked approach"
306
- ]
307
- },
308
- {
309
- "category": "Strategic Failures",
310
- "scenarios": [
311
- "Solving the wrong problem",
312
- "Solution didn't address root cause",
313
- "Opportunity cost was too high",
314
- "Failed to achieve minimum viable scale"
315
- ]
316
- },
317
- {
318
- "category": "Resource Failures",
319
- "scenarios": [
320
- "Ran out of budget before completion",
321
- "Couldn't secure necessary partnerships",
322
- "Technical infrastructure couldn't scale",
323
- "Dependencies failed or were delayed"
324
- ]
325
- }
326
- ],
327
- "early_warning_signs": [
328
- "Metrics trending wrong direction",
329
- "Increasing resistance or skepticism",
330
- "Scope creep and deadline slips",
331
- "Key assumptions proving false",
332
- "Team morale declining"
333
- ],
334
- "preventive_measures": [
335
- "Define clear success metrics upfront",
336
- "Build in checkpoints for go/no-go decisions",
337
- "Create contingency plans for top 3 risks",
338
- "Establish kill criteria before starting",
339
- "Schedule regular assumption testing"
340
- ],
341
- "questions_to_answer": [
342
- "What would need to be true for this to succeed?",
343
- "What's our plan if X fails?",
344
- "How will we know if we're on the wrong path?",
345
- "What's our exit strategy?"
346
- ]
347
  }
348
 
349
- return json.dumps(premortem, indent=2)
 
 
 
350
 
351
  @mcp.tool()
352
- def identify_stakeholders_and_impacts(
353
- idea: str,
354
- organization_context: str = ""
355
- ) -> str:
356
  """
357
  Identifies all stakeholders and analyzes potential impacts on each group.
358
 
@@ -364,105 +397,58 @@ def identify_stakeholders_and_impacts(
364
  JSON string containing stakeholder analysis with impacts, concerns, and engagement strategies
365
  """
366
 
367
- analysis = {
368
- "timestamp": datetime.now().isoformat(),
369
- "stakeholder_groups": [
370
- {
371
- "group": "Direct Users/Customers",
372
- "impact_level": "high",
373
- "potential_impacts": [
374
- "Changed user experience or workflow",
375
- "New learning curve required",
376
- "Value proposition shift"
377
- ],
378
- "likely_concerns": [
379
- "Will this make things better or worse?",
380
- "How much effort to adapt?",
381
- "What if I don't like it?"
382
- ],
383
- "engagement_strategy": "Early involvement, clear communication, feedback loops"
384
- },
385
- {
386
- "group": "Implementation Team",
387
- "impact_level": "high",
388
- "potential_impacts": [
389
- "Additional workload",
390
- "New skills required",
391
- "Changed priorities"
392
- ],
393
- "likely_concerns": [
394
- "Do we have capacity?",
395
- "What gets deprioritized?",
396
- "Is timeline realistic?"
397
- ],
398
- "engagement_strategy": "Involve in planning, realistic scoping, adequate resources"
399
- },
400
- {
401
- "group": "Leadership/Decision Makers",
402
- "impact_level": "medium",
403
- "potential_impacts": [
404
- "Resource allocation decisions",
405
- "Strategic direction implications",
406
- "Risk exposure"
407
- ],
408
- "likely_concerns": [
409
- "What's the ROI?",
410
- "What are the risks?",
411
- "How does this fit strategy?"
412
- ],
413
- "engagement_strategy": "Business case, risk mitigation plan, metrics"
414
- },
415
- {
416
- "group": "Adjacent Teams/Partners",
417
- "impact_level": "medium",
418
- "potential_impacts": [
419
- "Workflow dependencies",
420
- "Integration requirements",
421
- "Coordination overhead"
422
- ],
423
- "likely_concerns": [
424
- "How does this affect our work?",
425
- "What do we need to change?",
426
- "Were we consulted?"
427
- ],
428
- "engagement_strategy": "Early coordination, clear interfaces, collaborative planning"
429
- },
430
- {
431
- "group": "Competitors/Market",
432
- "impact_level": "low",
433
- "potential_impacts": [
434
- "Competitive dynamics shift",
435
- "Market expectations change",
436
- "Industry standards affected"
437
- ],
438
- "likely_concerns": [
439
- "How to respond?",
440
- "Is this a threat or opportunity?"
441
- ],
442
- "engagement_strategy": "Market monitoring, strategic positioning"
443
- }
444
- ],
445
- "overlooked_stakeholders": [
446
- "Consider: Who maintains this long-term?",
447
- "Consider: Who pays for this?",
448
- "Consider: Who gets blamed if it fails?",
449
- "Consider: Whose job becomes harder/easier?"
450
- ],
451
- "conflict_analysis": {
452
- "potential_conflicts": [
453
- "Short-term costs vs long-term benefits",
454
- "Individual convenience vs collective good",
455
- "Innovation speed vs risk management"
456
- ],
457
- "resolution_approaches": [
458
- "Transparent trade-off discussions",
459
- "Pilot programs to demonstrate value",
460
- "Phased rollout to manage change"
461
- ]
462
  }
 
 
 
 
 
 
 
 
463
  }
464
 
465
- return json.dumps(analysis, indent=2)
 
 
 
466
 
467
  @mcp.tool()
468
  def second_order_thinking(idea: str, time_horizon: str = "2-5 years") -> str:
@@ -477,77 +463,75 @@ def second_order_thinking(idea: str, time_horizon: str = "2-5 years") -> str:
477
  JSON string containing cascade of consequences and system-level effects
478
  """
479
 
480
- analysis = {
481
- "timestamp": datetime.now().isoformat(),
482
- "time_horizon": time_horizon,
483
- "consequence_cascade": {
484
- "first_order": {
485
- "description": "Immediate, direct effects",
486
- "examples": [
487
- "Direct impact on users/customers",
488
- "Immediate resource requirements",
489
- "Initial results or outcomes"
490
- ]
491
- },
492
- "second_order": {
493
- "description": "Effects of the first-order effects",
494
- "examples": [
495
- "How do people adapt to the change?",
496
- "What new behaviors emerge?",
497
- "What dependencies are created?",
498
- "What gets easier/harder as a result?"
499
- ]
500
- },
501
- "third_order": {
502
- "description": "Systemic shifts and long-term transformations",
503
- "examples": [
504
- "How does culture or mindset shift?",
505
- "What new equilibrium is reached?",
506
- "What irreversible changes occur?",
507
- "What becomes possible/impossible?"
508
- ]
509
- }
510
- },
511
- "unintended_consequences": {
512
- "positive": [
513
- "Unexpected benefits that may emerge",
514
- "Side effects that create value elsewhere",
515
- "Learning and capability building"
516
- ],
517
- "negative": [
518
- "Perverse incentives created",
519
- "Workarounds that undermine intent",
520
- "Dependencies that create fragility",
521
- "Race conditions or competitive dynamics"
522
- ]
523
- },
524
- "feedback_loops": {
525
- "reinforcing": [
526
- "What positive feedback loops could accelerate this?",
527
- "What could spiral out of control?"
528
- ],
529
- "balancing": [
530
- "What natural limits exist?",
531
- "What forces will push back?"
532
- ]
533
- },
534
- "questions_to_explore": [
535
- "If this succeeds, then what?",
536
- "What happens when everyone does this?",
537
- "What does this make inevitable?",
538
- "What does this make obsolete?",
539
- "What new problems does this create?"
540
- ]
541
  }
542
 
543
- return json.dumps(analysis, indent=2)
 
 
 
544
 
545
  @mcp.tool()
546
- def opportunity_cost_analysis(
547
- idea: str,
548
- resources: str = "",
549
- alternatives: str = ""
550
- ) -> str:
551
  """
552
  Analyzes opportunity costs: what you give up by choosing this path.
553
 
@@ -560,75 +544,72 @@ def opportunity_cost_analysis(
560
  JSON string containing opportunity cost analysis and trade-off framework
561
  """
562
 
563
- analysis = {
564
- "timestamp": datetime.now().isoformat(),
565
- "core_concept": "Opportunity cost is what you give up when you choose one option over another",
566
- "resource_commitments": {
567
- "time": {
568
- "direct_time": "Time spent implementing this idea",
569
- "opportunity_cost": "What else could be done with that time?",
570
- "questions": [
571
- "Is this the highest-value use of our time?",
572
- "What are we NOT doing by doing this?"
573
- ]
574
- },
575
- "money": {
576
- "direct_cost": "Financial investment required",
577
- "opportunity_cost": "Alternative investments or savings",
578
- "questions": [
579
- "What else could this money fund?",
580
- "What's the expected return compared to alternatives?"
581
- ]
582
- },
583
- "attention": {
584
- "direct_cost": "Mental energy and focus required",
585
- "opportunity_cost": "Other priorities that get less attention",
586
- "questions": [
587
- "What falls through the cracks?",
588
- "Where should attention be focused?"
589
- ]
590
- },
591
- "reputation": {
592
- "direct_cost": "Credibility and social capital at stake",
593
- "opportunity_cost": "Political capital spent, trust consumed",
594
- "questions": [
595
- "What if this fails publicly?",
596
- "Is this worth spending reputation on?"
597
- ]
598
- }
599
- },
600
- "trade_off_framework": {
601
- "explicit_trade_offs": [
602
- "What are we openly choosing to sacrifice?",
603
- "What constraints are we accepting?"
604
- ],
605
- "implicit_trade_offs": [
606
- "What are we giving up without realizing it?",
607
- "What doors close by choosing this path?"
608
- ],
609
- "reversibility": [
610
- "Can we undo this decision later?",
611
- "What becomes locked in?",
612
- "What optionality do we lose?"
613
- ]
614
  },
615
- "better_alternatives_test": {
616
- "questions": [
617
- "If we had unlimited time/money, would we still choose this?",
618
- "What would we do if this option didn't exist?",
619
- "Is there a 10x better option we're not seeing?",
620
- "Are we settling for local maximum?"
621
- ]
622
  },
623
- "recommendations": [
624
- "List out what you're explicitly NOT doing",
625
- "Quantify opportunity costs where possible",
626
- "Consider: is this a reversible decision?",
627
- "Ask: what keeps us from doing this AND something else?"
628
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
629
  }
630
 
631
- return json.dumps(analysis, indent=2)
 
 
 
632
 
633
  @mcp.tool()
634
  def red_team_analysis(idea: str, attack_surface: str = "") -> str:
@@ -643,92 +624,73 @@ def red_team_analysis(idea: str, attack_surface: str = "") -> str:
643
  JSON string containing attack vectors, vulnerabilities, and defensive measures
644
  """
645
 
646
- analysis = {
647
- "timestamp": datetime.now().isoformat(),
648
- "red_team_mindset": "Assume adversarial intent. How can this be broken, gamed, or exploited?",
649
- "attack_vectors": [
650
- {
651
- "category": "Incentive Manipulation",
652
- "attacks": [
653
- "How can users game the system?",
654
- "What perverse incentives does this create?",
655
- "How could metrics be manipulated?",
656
- "What loopholes exist?"
657
- ]
658
- },
659
- {
660
- "category": "Technical Vulnerabilities",
661
- "attacks": [
662
- "What breaks at scale?",
663
- "Where are single points of failure?",
664
- "What assumptions break under stress?",
665
- "What's the weakest link?"
666
- ]
667
- },
668
- {
669
- "category": "Economic Attacks",
670
- "attacks": [
671
- "How could competitors undermine this?",
672
- "What's the economic incentive to break it?",
673
- "How can value be extracted unfairly?",
674
- "What arbitrage opportunities exist?"
675
- ]
676
- },
677
- {
678
- "category": "Social Engineering",
679
- "attacks": [
680
- "How can trust be exploited?",
681
- "What social dynamics undermine intent?",
682
- "How can this be weaponized?",
683
- "What could go viral in bad ways?"
684
- ]
685
- },
686
- {
687
- "category": "Edge Cases",
688
- "attacks": [
689
- "What happens at extremes?",
690
- "What if 1000x more users?",
691
- "What if all users do X at once?",
692
- "What breaks the model?"
693
- ]
694
- }
695
- ],
696
- "defensive_measures": {
697
- "prevention": [
698
- "Design out vulnerabilities",
699
- "Limit attack surface",
700
- "Build in circuit breakers",
701
- "Rate limiting and quotas"
702
- ],
703
- "detection": [
704
- "Monitor for anomalies",
705
- "Set up alerts for abuse patterns",
706
- "Track leading indicators",
707
- "Regular security audits"
708
- ],
709
- "response": [
710
- "Incident response plan",
711
- "Ability to roll back quickly",
712
- "Clear escalation paths",
713
- "Communication strategy"
714
- ]
715
- },
716
- "worst_case_scenarios": [
717
- "What's the absolute worst that could happen?",
718
- "How bad could this get before we notice?",
719
- "What if our assumptions are completely wrong?",
720
- "What if malicious actors target this?"
721
- ],
722
- "stress_test_questions": [
723
- "What breaks first under pressure?",
724
- "Where is there no redundancy?",
725
- "What do we trust that we shouldn't?",
726
- "What could cascade into catastrophic failure?"
727
- ]
728
  }
729
 
730
- return json.dumps(analysis, indent=2)
 
 
 
 
 
 
 
731
 
732
- # Run the server
733
  if __name__ == "__main__":
734
  mcp.run()
 
1
  """
2
  MCP Server for Second Opinion AI Agent
3
  Provides tools for analyzing ideas, detecting biases, and generating alternatives
4
+ Tools use LLM to generate context-aware responses based on user input
5
  """
6
 
7
  from mcp.server.fastmcp import FastMCP
8
  from pydantic import BaseModel, Field
9
  from typing import List, Dict, Optional, Literal
10
  import json
11
+ import os
12
  from datetime import datetime
13
 
14
  # Initialize FastMCP server
15
  mcp = FastMCP("second-opinion-tools")
16
 
17
+ # =============================================================================
18
+ # LLM INTEGRATION FOR CONTEXTUAL ANALYSIS
19
+ # =============================================================================
20
+
21
+ def get_llm_client():
22
+ """Get an LLM client based on available API keys"""
23
+ # Try Google Gemini first (often has free tier)
24
+ google_key = os.environ.get("GOOGLE_API_KEY")
25
+ if google_key:
26
+ try:
27
+ import google.generativeai as genai
28
+ genai.configure(api_key=google_key)
29
+ return ("gemini", genai)
30
+ except ImportError:
31
+ pass
32
+
33
+ # Try OpenAI
34
+ openai_key = os.environ.get("OPENAI_API_KEY")
35
+ if openai_key:
36
+ try:
37
+ from openai import OpenAI
38
+ return ("openai", OpenAI(api_key=openai_key))
39
+ except ImportError:
40
+ pass
41
+
42
+ # Try Anthropic
43
+ anthropic_key = os.environ.get("ANTHROPIC_API_KEY")
44
+ if anthropic_key:
45
+ try:
46
+ import anthropic
47
+ return ("anthropic", anthropic.Anthropic(api_key=anthropic_key))
48
+ except ImportError:
49
+ pass
50
+
51
+ return (None, None)
52
+
53
+
54
+ def call_llm(prompt: str, max_tokens: int = 2000) -> str:
55
+ """Call the available LLM with a prompt"""
56
+ provider, client = get_llm_client()
57
+
58
+ if provider is None:
59
+ return None # No LLM available, will fall back to template
60
+
61
+ try:
62
+ if provider == "gemini":
63
+ model = client.GenerativeModel("gemini-2.0-flash-lite")
64
+ response = model.generate_content(prompt)
65
+ return response.text
66
+
67
+ elif provider == "openai":
68
+ response = client.chat.completions.create(
69
+ model="gpt-4o-mini",
70
+ messages=[{"role": "user", "content": prompt}],
71
+ max_tokens=max_tokens,
72
+ temperature=0.7
73
+ )
74
+ return response.choices[0].message.content
75
+
76
+ elif provider == "anthropic":
77
+ response = client.messages.create(
78
+ model="claude-haiku-4-5-20251001",
79
+ max_tokens=max_tokens,
80
+ messages=[{"role": "user", "content": prompt}]
81
+ )
82
+ return response.content[0].text
83
+
84
+ except Exception as e:
85
+ print(f"LLM call failed: {e}")
86
+ return None
87
+
88
+ return None
89
+
90
+
91
+ def generate_contextual_analysis(tool_name: str, idea: str, extra_context: str,
92
+ analysis_prompt: str, fallback_template: dict) -> str:
93
+ """
94
+ Generate contextual analysis using LLM, with fallback to template.
95
+
96
+ Args:
97
+ tool_name: Name of the tool for logging
98
+ idea: The user's idea to analyze
99
+ extra_context: Additional context provided by user
100
+ analysis_prompt: The specific prompt for this analysis type
101
+ fallback_template: Template to use if LLM is unavailable
102
+
103
+ Returns:
104
+ JSON string with analysis results
105
+ """
106
+ full_prompt = f"""{analysis_prompt}
107
+
108
+ IDEA TO ANALYZE:
109
+ {idea}
110
+
111
+ {f"ADDITIONAL CONTEXT: {extra_context}" if extra_context else ""}
112
+
113
+ Respond with a valid JSON object only. No markdown, no code blocks, just the JSON."""
114
+
115
+ llm_response = call_llm(full_prompt)
116
+
117
+ if llm_response:
118
+ # Try to parse as JSON, clean up if needed
119
+ try:
120
+ # Remove markdown code blocks if present
121
+ cleaned = llm_response.strip()
122
+ if cleaned.startswith("```"):
123
+ cleaned = cleaned.split("\n", 1)[1] # Remove first line
124
+ if cleaned.endswith("```"):
125
+ cleaned = cleaned.rsplit("```", 1)[0]
126
+ cleaned = cleaned.strip()
127
+
128
+ # Validate it's JSON
129
+ parsed = json.loads(cleaned)
130
+ parsed["_generated"] = "contextual"
131
+ parsed["timestamp"] = datetime.now().isoformat()
132
+ return json.dumps(parsed, indent=2)
133
+ except json.JSONDecodeError:
134
+ # If not valid JSON, wrap the response
135
+ return json.dumps({
136
+ "timestamp": datetime.now().isoformat(),
137
+ "_generated": "contextual",
138
+ "analysis": llm_response
139
+ }, indent=2)
140
+
141
+ # Fallback to template
142
+ fallback_template["_generated"] = "template"
143
+ fallback_template["timestamp"] = datetime.now().isoformat()
144
+ fallback_template["idea_analyzed"] = idea[:200] + "..." if len(idea) > 200 else idea
145
+ return json.dumps(fallback_template, indent=2)
146
+
147
+
148
+ # =============================================================================
149
+ # MCP TOOLS
150
+ # =============================================================================
151
 
152
  @mcp.tool()
153
  def analyze_assumptions(idea: str, context: str = "") -> str:
 
162
  JSON string containing identified assumptions, their implications, and questions to verify them
163
  """
164
 
165
+ analysis_prompt = """You are an expert critical thinking analyst. Analyze the given idea to identify ALL assumptions - both explicit and hidden.
166
+
167
+ Your analysis must be specific to this exact idea. Identify:
168
+ 1. Explicit assumptions stated directly
169
+ 2. Implicit/hidden assumptions not stated but required for the idea to work
170
+ 3. Foundational beliefs the idea rests upon
171
+ 4. Contextual assumptions about timing, market, resources, etc.
172
+
173
+ For each assumption, explain:
174
+ - What the assumption is
175
+ - Why it matters
176
+ - What happens if it's wrong
177
+ - How to verify it
178
+
179
+ Return a JSON object with this structure:
180
+ {
181
+ "idea_summary": "brief summary of the idea",
182
+ "explicit_assumptions": [
183
+ {"assumption": "...", "importance": "high/medium/low", "verification": "how to test this"}
184
+ ],
185
+ "hidden_assumptions": [
186
+ {"assumption": "...", "why_hidden": "...", "risk_if_wrong": "..."}
187
+ ],
188
+ "foundational_beliefs": ["belief 1", "belief 2"],
189
+ "critical_questions": ["question 1", "question 2", "question 3"],
190
+ "highest_risk_assumption": "the assumption most likely to be wrong or cause failure"
191
+ }"""
192
+
193
+ fallback = {
194
+ "explicit_assumptions": ["Unable to analyze - LLM not available"],
195
+ "hidden_assumptions": ["Please check API key configuration"],
196
+ "foundational_beliefs": [],
197
+ "critical_questions": [],
198
+ "highest_risk_assumption": "Analysis unavailable"
199
  }
200
 
201
+ return generate_contextual_analysis(
202
+ "analyze_assumptions", idea, context, analysis_prompt, fallback
203
+ )
204
+
205
 
206
  @mcp.tool()
207
  def detect_cognitive_biases(idea: str, reasoning: str = "") -> str:
 
216
  JSON string containing detected biases, their descriptions, and mitigation strategies
217
  """
218
 
219
+ analysis_prompt = """You are a cognitive bias expert. Analyze the given idea and reasoning to detect specific cognitive biases that may be affecting the thinking.
220
+
221
+ Look for evidence of these common biases:
222
+ - Confirmation bias (seeking confirming evidence)
223
+ - Anchoring bias (over-relying on first information)
224
+ - Sunk cost fallacy (continuing due to past investment)
225
+ - Availability bias (overweighting recent/memorable events)
226
+ - Optimism bias (underestimating risks)
227
+ - Survivorship bias (only seeing successes)
228
+ - Dunning-Kruger effect (overestimating competence)
229
+ - Status quo bias (preferring current state)
230
+ - Bandwagon effect (following the crowd)
231
+ - Recency bias (overweighting recent events)
232
+
233
+ For each bias detected, provide SPECIFIC evidence from the idea/reasoning.
234
+
235
+ Return a JSON object with this structure:
236
+ {
237
+ "idea_summary": "brief summary",
238
+ "detected_biases": [
239
+ {
240
+ "bias_name": "name of bias",
241
+ "evidence": "specific quote or aspect that shows this bias",
242
+ "severity": "high/medium/low",
243
+ "how_it_distorts": "how this bias is affecting the decision"
 
 
 
 
 
 
 
 
 
 
 
244
  }
245
+ ],
246
+ "most_concerning_bias": "the bias most likely to lead to a bad decision",
247
+ "debiasing_strategies": [
248
+ "specific action to counter the biases found"
249
+ ],
250
+ "questions_to_ask": [
251
+ "question that would help overcome these biases"
252
+ ]
253
+ }"""
254
+
255
+ fallback = {
256
+ "detected_biases": [{"bias_name": "Analysis unavailable", "evidence": "LLM not configured", "severity": "unknown"}],
257
+ "most_concerning_bias": "Unable to analyze",
258
+ "debiasing_strategies": ["Check API configuration"],
259
+ "questions_to_ask": []
260
  }
261
 
262
+ return generate_contextual_analysis(
263
+ "detect_cognitive_biases", idea, reasoning, analysis_prompt, fallback
264
+ )
265
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
266
 
267
  @mcp.tool()
268
+ def generate_alternatives(idea: str, constraints: str = "", num_alternatives: int = 5) -> str:
 
 
 
 
269
  """
270
  Generates alternative approaches and solutions to consider.
271
 
 
278
  JSON string containing diverse alternative approaches with pros/cons analysis
279
  """
280
 
281
+ num_alternatives = max(1, min(10, num_alternatives))
282
+
283
+ analysis_prompt = f"""You are a creative strategist. Generate {num_alternatives} genuinely different alternatives to the proposed idea.
284
+
285
+ Don't just tweak the original - think of fundamentally different approaches that could achieve similar goals.
286
+
287
+ Consider:
288
+ - What if we did the opposite?
289
+ - What's the minimum viable version?
290
+ - What would a 10x version look like?
291
+ - How would different industries solve this?
292
+ - What if we removed a key constraint?
293
+
294
+ {f"CONSTRAINTS TO WORK WITHIN: {constraints}" if constraints else ""}
295
+
296
+ Return a JSON object with this structure:
297
+ {{
298
+ "original_idea_summary": "brief summary of original",
299
+ "goal_identified": "the underlying goal this idea is trying to achieve",
300
+ "alternatives": [
301
+ {{
302
+ "name": "descriptive name",
303
+ "description": "what this alternative involves",
304
+ "how_different": "how this differs from the original",
305
+ "pros": ["advantage 1", "advantage 2"],
306
+ "cons": ["disadvantage 1", "disadvantage 2"],
307
+ "feasibility": "high/medium/low",
308
+ "best_if": "scenario where this alternative would be best"
309
+ }}
310
+ ],
311
+ "recommended_alternative": "which alternative seems most promising and why",
312
+ "hybrid_suggestion": "how to combine elements from multiple alternatives"
313
+ }}"""
314
+
315
+ fallback = {
316
+ "original_idea_summary": "Analysis unavailable",
317
+ "alternatives": [{"name": "LLM not available", "description": "Please configure API keys"}],
318
+ "recommended_alternative": "Unable to analyze"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
319
  }
320
 
321
+ return generate_contextual_analysis(
322
+ "generate_alternatives", idea, constraints, analysis_prompt, fallback
323
+ )
324
+
325
 
326
  @mcp.tool()
327
  def perform_premortem_analysis(idea: str, timeframe: str = "1 year") -> str:
 
336
  JSON string containing potential failure modes, warning signs, and preventive measures
337
  """
338
 
339
+ analysis_prompt = f"""You are a risk analyst performing a pre-mortem analysis. Imagine it's {timeframe} from now and this idea has COMPLETELY FAILED.
340
+
341
+ Your job is to work backwards and identify all the reasons why it failed. Be specific to THIS idea - don't give generic failure modes.
342
+
343
+ Consider failures in:
344
+ - Execution (team, skills, timeline)
345
+ - Market/External factors (competition, regulation, timing)
346
+ - Strategy (wrong problem, wrong solution)
347
+ - Resources (money, people, technology)
348
+ - Assumptions (what turned out to be wrong)
349
+
350
+ Return a JSON object with this structure:
351
+ {{
352
+ "scenario": "It's {timeframe} from now, and the idea has failed because...",
353
+ "primary_cause_of_failure": "the single biggest reason it failed",
354
+ "failure_modes": [
355
+ {{
356
+ "category": "execution/market/strategy/resources/assumptions",
357
+ "what_went_wrong": "specific failure",
358
+ "probability": "high/medium/low",
359
+ "impact": "catastrophic/major/moderate/minor"
360
+ }}
361
+ ],
362
+ "early_warning_signs": [
363
+ "specific signal that would indicate this failure is coming"
364
+ ],
365
+ "preventive_actions": [
366
+ {{
367
+ "action": "what to do now",
368
+ "prevents": "which failure mode this addresses"
369
+ }}
370
+ ],
371
+ "kill_criteria": "conditions under which you should abandon this idea",
372
+ "plan_b": "what to do if this fails"
373
+ }}"""
374
+
375
+ fallback = {
376
+ "scenario": f"Analysis for {timeframe} timeframe unavailable",
377
+ "failure_modes": [{"category": "unknown", "what_went_wrong": "LLM not configured"}],
378
+ "early_warning_signs": [],
379
+ "preventive_actions": []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
380
  }
381
 
382
+ return generate_contextual_analysis(
383
+ "perform_premortem_analysis", idea, timeframe, analysis_prompt, fallback
384
+ )
385
+
386
 
387
  @mcp.tool()
388
+ def identify_stakeholders_and_impacts(idea: str, organization_context: str = "") -> str:
 
 
 
389
  """
390
  Identifies all stakeholders and analyzes potential impacts on each group.
391
 
 
397
  JSON string containing stakeholder analysis with impacts, concerns, and engagement strategies
398
  """
399
 
400
+ analysis_prompt = """You are a stakeholder analysis expert. Identify ALL parties who will be affected by this idea - both obvious and non-obvious stakeholders.
401
+
402
+ For each stakeholder, analyze:
403
+ - How they'll be impacted (positively or negatively)
404
+ - What their likely concerns will be
405
+ - Whether they have power to help or block this
406
+ - How to engage them effectively
407
+
408
+ Don't forget often-overlooked stakeholders like:
409
+ - People who maintain/support this long-term
410
+ - Those whose workload changes
411
+ - Competitors and their customers
412
+ - Regulators or compliance teams
413
+ - Future employees/customers
414
+
415
+ Return a JSON object with this structure:
416
+ {
417
+ "idea_summary": "brief summary",
418
+ "stakeholders": [
419
+ {
420
+ "group": "stakeholder name",
421
+ "relationship": "how they relate to this idea",
422
+ "impact": "positive/negative/mixed",
423
+ "impact_description": "specific ways they're affected",
424
+ "likely_concerns": ["concern 1", "concern 2"],
425
+ "power_level": "high/medium/low",
426
+ "engagement_strategy": "how to work with them"
427
+ }
428
+ ],
429
+ "most_affected": "who has the most at stake",
430
+ "potential_blockers": ["stakeholders who might resist"],
431
+ "potential_champions": ["stakeholders who might advocate"],
432
+ "conflicts_to_manage": [
433
+ {
434
+ "between": "stakeholder A vs stakeholder B",
435
+ "conflict": "what they disagree about",
436
+ "resolution_approach": "how to address"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
437
  }
438
+ ],
439
+ "stakeholder_not_consulted": "who should be involved but often isn't"
440
+ }"""
441
+
442
+ fallback = {
443
+ "stakeholders": [{"group": "Analysis unavailable", "impact": "unknown"}],
444
+ "most_affected": "Unable to analyze",
445
+ "conflicts_to_manage": []
446
  }
447
 
448
+ return generate_contextual_analysis(
449
+ "identify_stakeholders_and_impacts", idea, organization_context, analysis_prompt, fallback
450
+ )
451
+
452
 
453
  @mcp.tool()
454
  def second_order_thinking(idea: str, time_horizon: str = "2-5 years") -> str:
 
463
  JSON string containing cascade of consequences and system-level effects
464
  """
465
 
466
+ analysis_prompt = f"""You are a systems thinker analyzing cascading consequences. For the given idea, think through what happens AFTER the immediate effects.
467
+
468
+ First-order effects are obvious. Your job is to find the second, third, and nth-order effects that aren't obvious.
469
+
470
+ Think about:
471
+ - How will people ADAPT to this change?
472
+ - What new behaviors will emerge?
473
+ - What feedback loops will be created?
474
+ - What becomes possible that wasn't before?
475
+ - What becomes impossible?
476
+ - What unintended consequences might occur?
477
+
478
+ Time horizon to consider: {time_horizon}
479
+
480
+ Return a JSON object with this structure:
481
+ {{
482
+ "idea_summary": "brief summary",
483
+ "first_order_effects": [
484
+ "immediate, obvious consequence 1",
485
+ "immediate, obvious consequence 2"
486
+ ],
487
+ "second_order_effects": [
488
+ {{
489
+ "effect": "what happens as a result of first-order effects",
490
+ "caused_by": "which first-order effect leads to this",
491
+ "timeline": "when this would manifest"
492
+ }}
493
+ ],
494
+ "third_order_effects": [
495
+ {{
496
+ "effect": "deeper consequence",
497
+ "chain": "first order -> second order -> this",
498
+ "probability": "high/medium/low"
499
+ }}
500
+ ],
501
+ "feedback_loops": [
502
+ {{
503
+ "type": "reinforcing/balancing",
504
+ "description": "what cycle gets created",
505
+ "implication": "why this matters"
506
+ }}
507
+ ],
508
+ "unintended_consequences": [
509
+ {{
510
+ "consequence": "what might happen unexpectedly",
511
+ "positive_or_negative": "positive/negative",
512
+ "how_to_monitor": "how to detect this early"
513
+ }}
514
+ ],
515
+ "what_becomes_possible": ["new opportunity 1"],
516
+ "what_becomes_impossible": ["closed door 1"],
517
+ "biggest_long_term_risk": "the consequence most likely to cause regret"
518
+ }}"""
519
+
520
+ fallback = {
521
+ "first_order_effects": ["Analysis unavailable - LLM not configured"],
522
+ "second_order_effects": [],
523
+ "third_order_effects": [],
524
+ "feedback_loops": [],
525
+ "unintended_consequences": []
 
526
  }
527
 
528
+ return generate_contextual_analysis(
529
+ "second_order_thinking", idea, time_horizon, analysis_prompt, fallback
530
+ )
531
+
532
 
533
  @mcp.tool()
534
+ def opportunity_cost_analysis(idea: str, resources: str = "", alternatives: str = "") -> str:
 
 
 
 
535
  """
536
  Analyzes opportunity costs: what you give up by choosing this path.
537
 
 
544
  JSON string containing opportunity cost analysis and trade-off framework
545
  """
546
 
547
+ extra_context = f"Resources available: {resources}\nAlternatives mentioned: {alternatives}" if resources or alternatives else ""
548
+
549
+ analysis_prompt = """You are an economist analyzing opportunity costs. For every choice, something is given up. Identify what's being sacrificed by pursuing this idea.
550
+
551
+ Consider opportunity costs across:
552
+ - Time (what else could this time be spent on?)
553
+ - Money (what else could this money fund?)
554
+ - Attention (what gets less focus?)
555
+ - Talent (what else could these people work on?)
556
+ - Reputation (what credibility is at stake?)
557
+ - Optionality (what future choices are foreclosed?)
558
+
559
+ Be specific to this idea - what are the ACTUAL trade-offs?
560
+
561
+ Return a JSON object with this structure:
562
+ {
563
+ "idea_summary": "brief summary",
564
+ "resource_commitments": {
565
+ "time": {
566
+ "amount": "estimated time commitment",
567
+ "opportunity_cost": "what else could be done with this time",
568
+ "is_worth_it": "yes/no/uncertain with reasoning"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
569
  },
570
+ "money": {
571
+ "amount": "estimated financial commitment",
572
+ "opportunity_cost": "alternative uses for this money",
573
+ "is_worth_it": "yes/no/uncertain with reasoning"
 
 
 
574
  },
575
+ "attention": {
576
+ "amount": "how much focus this requires",
577
+ "opportunity_cost": "what gets deprioritized",
578
+ "is_worth_it": "yes/no/uncertain with reasoning"
579
+ }
580
+ },
581
+ "doors_that_close": [
582
+ "option that becomes unavailable by choosing this"
583
+ ],
584
+ "hidden_costs": [
585
+ "cost that isn't obvious upfront"
586
+ ],
587
+ "reversibility": {
588
+ "is_reversible": "yes/partially/no",
589
+ "cost_to_reverse": "what it would take to undo this",
590
+ "point_of_no_return": "when does this become irreversible"
591
+ },
592
+ "better_uses_of_resources": [
593
+ {
594
+ "alternative": "what else you could do",
595
+ "expected_value": "potential outcome",
596
+ "why_not_doing_this": "reason this might not be chosen"
597
+ }
598
+ ],
599
+ "key_question": "the most important trade-off question to answer before proceeding"
600
+ }"""
601
+
602
+ fallback = {
603
+ "resource_commitments": {"time": {"opportunity_cost": "Analysis unavailable"}},
604
+ "doors_that_close": [],
605
+ "hidden_costs": [],
606
+ "reversibility": {"is_reversible": "unknown"}
607
  }
608
 
609
+ return generate_contextual_analysis(
610
+ "opportunity_cost_analysis", idea, extra_context, analysis_prompt, fallback
611
+ )
612
+
613
 
614
  @mcp.tool()
615
  def red_team_analysis(idea: str, attack_surface: str = "") -> str:
 
624
  JSON string containing attack vectors, vulnerabilities, and defensive measures
625
  """
626
 
627
+ analysis_prompt = """You are a red team analyst. Your job is to BREAK this idea. Think like an adversary, a competitor, a malicious user, or just Murphy's Law.
628
+
629
+ Attack from multiple angles:
630
+ - How could users game/exploit this?
631
+ - How could competitors undermine this?
632
+ - What technical/operational failures could occur?
633
+ - What edge cases break the model?
634
+ - How could this be weaponized or misused?
635
+ - What happens at 10x or 100x scale?
636
+
637
+ Be creative and ruthless. Find the weaknesses.
638
+
639
+ Return a JSON object with this structure:
640
+ {
641
+ "idea_summary": "brief summary",
642
+ "attack_vectors": [
643
+ {
644
+ "attack_name": "descriptive name",
645
+ "category": "gaming/competition/technical/scaling/misuse",
646
+ "how_attack_works": "step by step how this exploits the idea",
647
+ "likelihood": "high/medium/low",
648
+ "impact": "catastrophic/major/moderate/minor",
649
+ "example_scenario": "concrete example of this attack"
650
+ }
651
+ ],
652
+ "critical_vulnerabilities": [
653
+ {
654
+ "vulnerability": "what's weak",
655
+ "why_its_critical": "why this matters",
656
+ "fix": "how to address"
657
+ }
658
+ ],
659
+ "what_breaks_at_scale": [
660
+ "thing that works now but fails at 10x/100x"
661
+ ],
662
+ "worst_case_scenario": {
663
+ "scenario": "the absolute worst thing that could happen",
664
+ "probability": "high/medium/low",
665
+ "how_to_prevent": "what would stop this"
666
+ },
667
+ "defensive_recommendations": [
668
+ {
669
+ "defense": "what to implement",
670
+ "addresses": "which attacks/vulnerabilities this covers",
671
+ "priority": "immediate/soon/eventually"
672
+ }
673
+ ],
674
+ "monitoring_needed": [
675
+ "signal to watch for that indicates attack/failure"
676
+ ]
677
+ }"""
678
+
679
+ fallback = {
680
+ "attack_vectors": [{"attack_name": "Analysis unavailable", "how_attack_works": "LLM not configured"}],
681
+ "critical_vulnerabilities": [],
682
+ "worst_case_scenario": {"scenario": "Unable to analyze"},
683
+ "defensive_recommendations": []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
684
  }
685
 
686
+ return generate_contextual_analysis(
687
+ "red_team_analysis", idea, attack_surface, analysis_prompt, fallback
688
+ )
689
+
690
+
691
+ # =============================================================================
692
+ # RUN SERVER
693
+ # =============================================================================
694
 
 
695
  if __name__ == "__main__":
696
  mcp.run()