File size: 9,079 Bytes
8b21538
5720799
 
 
 
6015c25
5720799
8b21538
5720799
 
 
 
8b21538
 
 
 
5720799
 
 
8b21538
 
 
 
 
 
 
 
 
 
5720799
 
8b21538
6015c25
 
 
 
 
 
 
 
 
8b21538
 
 
 
 
 
 
5720799
8b21538
3771a70
6015c25
 
8b21538
6015c25
 
 
5720799
6015c25
8b21538
 
 
6015c25
 
 
 
8b21538
 
 
 
 
 
 
 
 
 
6015c25
 
8b21538
 
6015c25
 
 
 
8b21538
6015c25
8b21538
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5720799
 
 
 
 
 
 
 
8b21538
 
 
 
 
5720799
 
8b21538
 
 
 
6015c25
8b21538
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5720799
 
 
 
 
6015c25
 
 
8b21538
 
6015c25
 
 
 
8b21538
 
6015c25
8b21538
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5720799
8b21538
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
# Force redeploy trigger - version 1.5
import streamlit as st
from utils.config import config
import requests
import json
import os
from core.memory import load_user_state
from core.llm import LLMClient

# Set page config
st.set_page_config(page_title="AI Life Coach", page_icon="🧘", layout="centered")

# Initialize session state for ngrok URL
if "ngrok_url" not in st.session_state:
    st.session_state.ngrok_url = config.ollama_host

# Sidebar for user selection
st.sidebar.title("🧘 AI Life Coach")
user = st.sidebar.selectbox("Select User", ["Rob", "Sarah"])

# Ngrok URL input in sidebar
st.sidebar.markdown("---")
st.sidebar.subheader("Ollama Connection")
ngrok_input = st.sidebar.text_input("Ngrok URL", value=st.session_state.ngrok_url)
if st.sidebar.button("Update Ngrok URL"):
    st.session_state.ngrok_url = ngrok_input
    st.sidebar.success("Ngrok URL updated!")
    st.experimental_rerun()

st.sidebar.markdown("---")

# Get environment info
BASE_URL = os.environ.get("SPACE_ID", "")  # Will be set in HF Spaces
IS_HF_SPACE = bool(BASE_URL)

# Headers to skip ngrok browser warning
NGROK_HEADERS = {
    "ngrok-skip-browser-warning": "true",
    "User-Agent": "AI-Life-Coach-App"
}

# Session state for model status
if "model_status" not in st.session_state:
    st.session_state.model_status = "checking"

if "available_models" not in st.session_state:
    st.session_state.available_models = []

# Fetch Ollama status
def get_ollama_status(ngrok_url):
    try:
        # Try to connect to the remote Ollama service directly
        response = requests.get(
            f"{ngrok_url}/api/tags", 
            headers=NGROK_HEADERS,
            timeout=10
        )
        if response.status_code == 200:
            models = response.json().get("models", [])
            model_names = [m.get("name") for m in models]
            st.session_state.available_models = model_names
            
            if models:
                return {
                    "running": True, 
                    "model_loaded": models[0].get("name"),
                    "remote_host": ngrok_url,
                    "available_models": model_names
                }
            else:
                st.session_state.model_status = "no_models"
                return {
                    "running": False, 
                    "model_loaded": None,
                    "remote_host": ngrok_url,
                    "message": "Connected to Ollama but no models found"
                }
    except Exception as e:
        st.session_state.model_status = "unreachable"
        # If direct connection fails, return error info
        return {
            "running": False, 
            "model_loaded": None,
            "error": str(e),
            "remote_host": ngrok_url
        }

# Poll for model availability
def poll_model_status(ngrok_url):
    if st.session_state.model_status in ["checking", "no_models"]:
        try:
            response = requests.get(
                f"{ngrok_url}/api/tags", 
                headers=NGROK_HEADERS,
                timeout=5
            )
            if response.status_code == 200:
                models = response.json().get("models", [])
                model_names = [m.get("name") for m in models]
                st.session_state.available_models = model_names
                
                if config.local_model_name in model_names:
                    st.session_state.model_status = "ready"
                elif models:
                    st.session_state.model_status = "different_models"
                else:
                    st.session_state.model_status = "no_models"
        except:
            st.session_state.model_status = "unreachable"

# After user selects name, load conversation history
def get_conversation_history(user_id):
    user_state = load_user_state(user_id)
    if user_state and "conversation" in user_state:
        return json.loads(user_state["conversation"])
    return []

# Check Ollama status with the current ngrok URL
ollama_status = get_ollama_status(st.session_state.ngrok_url)

# Poll for model status (run once per session)
poll_model_status(st.session_state.ngrok_url)

# Display Ollama status
use_fallback = not ollama_status.get("running", False) or config.use_fallback

if use_fallback:
    st.sidebar.warning("🌐 Using Hugging Face fallback (Ollama not available)")
    if "error" in ollama_status:
        st.sidebar.caption(f"Error: {ollama_status['error'][:50]}...")
else:
    st.sidebar.success(f"🧠 Ollama Model: {ollama_status['model_loaded']}")
    st.sidebar.info(f"Connected to: {ollama_status['remote_host']}")

# Model status indicator
model_status_container = st.sidebar.empty()
if st.session_state.model_status == "ready":
    model_status_container.success("βœ… Model Ready")
elif st.session_state.model_status == "checking":
    model_status_container.info("πŸ” Checking model...")
elif st.session_state.model_status == "no_models":
    model_status_container.warning("⚠️ No models found")
elif st.session_state.model_status == "different_models":
    model_status_container.warning("⚠️ Different models available")
else:  # unreachable
    model_status_container.error("❌ Ollama unreachable")

# Main chat interface
st.title("🧘 AI Life Coach")
st.markdown("Talk to your personal development assistant.")

# Show detailed status
with st.expander("πŸ” Connection Status"):
    st.write("Ollama Status:", ollama_status)
    st.write("Model Status:", st.session_state.model_status)
    st.write("Available Models:", st.session_state.available_models)
    st.write("Environment Info:")
    st.write("- Is HF Space:", IS_HF_SPACE)
    st.write("- Base URL:", BASE_URL or "Not in HF Space")
    st.write("- Configured Ollama Host:", config.ollama_host)
    st.write("- Current Ngrok URL:", st.session_state.ngrok_url)
    st.write("- Using Fallback:", use_fallback)

# Function to send message to Ollama
def send_to_ollama(user_input, conversation_history, ngrok_url):
    try:
        payload = {
            "model": config.local_model_name,
            "messages": conversation_history,
            "stream": False
        }
        
        response = requests.post(
            f"{ngrok_url}/api/chat",
            json=payload,
            headers=NGROK_HEADERS,
            timeout=60
        )
        
        if response.status_code == 200:
            response_data = response.json()
            return response_data.get("message", {}).get("content", "")
        else:
            st.error(f"Ollama API error: {response.status_code}")
            st.error(response.text[:200])
            return None
    except Exception as e:
        st.error(f"Connection error: {e}")
        return None

# Function to send message to Hugging Face (fallback)
def send_to_hf(user_input, conversation_history):
    try:
        # Initialize LLM client for Hugging Face
        llm_client = LLMClient(provider="huggingface")
        
        # Format prompt for HF
        prompt = ""
        for msg in conversation_history:
            role = msg["role"]
            content = msg["content"]
            if role == "system":
                prompt += f"System: {content}\n"
            elif role == "user":
                prompt += f"Human: {content}\n"
            elif role == "assistant":
                prompt += f"Assistant: {content}\n"
        prompt += "Assistant:"
        
        response = llm_client.generate(prompt, max_tokens=500, stream=False)
        return response
    except Exception as e:
        st.error(f"Hugging Face API error: {e}")
        return None

# Display conversation history
conversation = get_conversation_history(user)
for msg in conversation:
    role = msg["role"].capitalize()
    content = msg["content"]
    st.markdown(f"**{role}:** {content}")

# Chat input
user_input = st.text_input("Your message...", key="input")
if st.button("Send"):
    if user_input.strip() == "":
        st.warning("Please enter a message.")
    else:
        # Display user message
        st.markdown(f"**You:** {user_input}")

        # Prepare conversation history
        conversation_history = [{"role": msg["role"], "content": msg["content"]} 
                              for msg in conversation[-5:]]  # Last 5 messages
        conversation_history.append({"role": "user", "content": user_input})
        
        # Send to appropriate backend
        with st.spinner("AI Coach is thinking..."):
            if use_fallback:
                ai_response = send_to_hf(user_input, conversation_history)
                backend_used = "Hugging Face"
            else:
                ai_response = send_to_ollama(user_input, conversation_history, st.session_state.ngrok_url)
                backend_used = "Ollama"
            
            if ai_response:
                st.markdown(f"**AI Coach ({backend_used}):** {ai_response}")
                # Note: In a production app, we'd save the conversation to Redis here
            else:
                st.error(f"Failed to get response from {backend_used}.")