rdune71 commited on
Commit
5470458
·
1 Parent(s): c0ef6d4

Fix Ollama connection issues with enhanced error handling and updated ngrok URL

Browse files
Files changed (2) hide show
  1. app.py +36 -7
  2. test_ollama_connection.py +11 -2
app.py CHANGED
@@ -1,4 +1,4 @@
1
- # Force redeploy trigger - version 1.9
2
  import streamlit as st
3
  from utils.config import config
4
  import requests
@@ -87,13 +87,13 @@ st.sidebar.markdown("---")
87
  BASE_URL = os.environ.get("SPACE_ID", "")
88
  IS_HF_SPACE = bool(BASE_URL)
89
 
90
- # Fetch Ollama status
91
  def get_ollama_status(ngrok_url):
92
  try:
93
  response = requests.get(
94
  f"{ngrok_url}/api/tags",
95
  headers=NGROK_HEADERS,
96
- timeout=10
97
  )
98
  if response.status_code == 200:
99
  models = response.json().get("models", [])
@@ -112,11 +112,30 @@ def get_ollama_status(ngrok_url):
112
  else:
113
  st.session_state.model_status = "no_models"
114
  return {
115
- "running": False,
116
  "model_loaded": None,
117
  "remote_host": ngrok_url,
118
  "message": "Connected to Ollama but no models found"
119
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120
  else:
121
  st.session_state.model_status = "unreachable"
122
  return {
@@ -125,6 +144,14 @@ def get_ollama_status(ngrok_url):
125
  "error": f"HTTP {response.status_code}",
126
  "remote_host": ngrok_url
127
  }
 
 
 
 
 
 
 
 
128
  except Exception as e:
129
  st.session_state.model_status = "unreachable"
130
  return {
@@ -156,10 +183,12 @@ if ollama_status is None:
156
  "remote_host": st.session_state.ngrok_url
157
  }
158
 
159
- # Update model status
160
- if ollama_status.get("running", False):
161
- if ollama_status.get("available_models"):
162
  st.session_state.model_status = "ready"
 
 
163
  else:
164
  st.session_state.model_status = "no_models"
165
  else:
 
1
+ # Force redeploy trigger - version 2.0
2
  import streamlit as st
3
  from utils.config import config
4
  import requests
 
87
  BASE_URL = os.environ.get("SPACE_ID", "")
88
  IS_HF_SPACE = bool(BASE_URL)
89
 
90
+ # Fetch Ollama status with enhanced error handling
91
  def get_ollama_status(ngrok_url):
92
  try:
93
  response = requests.get(
94
  f"{ngrok_url}/api/tags",
95
  headers=NGROK_HEADERS,
96
+ timeout=15 # Increased timeout
97
  )
98
  if response.status_code == 200:
99
  models = response.json().get("models", [])
 
112
  else:
113
  st.session_state.model_status = "no_models"
114
  return {
115
+ "running": True, # Server is running but no models
116
  "model_loaded": None,
117
  "remote_host": ngrok_url,
118
  "message": "Connected to Ollama but no models found"
119
  }
120
+ elif response.status_code == 404:
121
+ # Server might be running but endpoint not available
122
+ response2 = requests.get(f"{ngrok_url}", headers=NGROK_HEADERS, timeout=10)
123
+ if response2.status_code == 200:
124
+ st.session_state.model_status = "checking"
125
+ return {
126
+ "running": True,
127
+ "model_loaded": "unknown",
128
+ "remote_host": ngrok_url,
129
+ "message": "Server running, endpoint check inconclusive"
130
+ }
131
+ else:
132
+ st.session_state.model_status = "unreachable"
133
+ return {
134
+ "running": False,
135
+ "model_loaded": None,
136
+ "error": f"HTTP {response.status_code}",
137
+ "remote_host": ngrok_url
138
+ }
139
  else:
140
  st.session_state.model_status = "unreachable"
141
  return {
 
144
  "error": f"HTTP {response.status_code}",
145
  "remote_host": ngrok_url
146
  }
147
+ except requests.exceptions.Timeout:
148
+ st.session_state.model_status = "unreachable"
149
+ return {
150
+ "running": False,
151
+ "model_loaded": None,
152
+ "error": "Timeout - server not responding",
153
+ "remote_host": ngrok_url
154
+ }
155
  except Exception as e:
156
  st.session_state.model_status = "unreachable"
157
  return {
 
183
  "remote_host": st.session_state.ngrok_url
184
  }
185
 
186
+ # Update model status with better logic
187
+ if ollama_status and ollama_status.get("running", False):
188
+ if ollama_status.get("available_models") and len(ollama_status.get("available_models", [])) > 0:
189
  st.session_state.model_status = "ready"
190
+ elif ollama_status.get("model_loaded") == "unknown":
191
+ st.session_state.model_status = "ready" # Assume ready if server responds
192
  else:
193
  st.session_state.model_status = "no_models"
194
  else:
test_ollama_connection.py CHANGED
@@ -5,8 +5,9 @@ from dotenv import load_dotenv
5
  # Load environment variables
6
  load_dotenv()
7
 
8
- OLLAMA_HOST = os.getenv("OLLAMA_HOST", "https://ace32bd59aef.ngrok-free.app")
9
- MODEL_NAME = os.getenv("LOCAL_MODEL_NAME", "mistral:latest")
 
10
 
11
  print(f"Testing Ollama connection to: {OLLAMA_HOST}")
12
  print(f"Using model: {MODEL_NAME}")
@@ -30,6 +31,14 @@ try:
30
  print(f"Found {len(models)} models:")
31
  for model in models:
32
  print(f" - {model['name']} ({model.get('size', 'Unknown size')})")
 
 
 
 
 
 
 
 
33
  else:
34
  print(f"Error: {response.text}")
35
  except Exception as e:
 
5
  # Load environment variables
6
  load_dotenv()
7
 
8
+ # Use the currently active ngrok URL from your logs
9
+ OLLAMA_HOST = os.getenv("OLLAMA_HOST", "https://f943b91f0a0c.ngrok-free.app")
10
+ MODEL_NAME = os.getenv("LOCAL_MODEL_NAME", "mistral")
11
 
12
  print(f"Testing Ollama connection to: {OLLAMA_HOST}")
13
  print(f"Using model: {MODEL_NAME}")
 
31
  print(f"Found {len(models)} models:")
32
  for model in models:
33
  print(f" - {model['name']} ({model.get('size', 'Unknown size')})")
34
+ elif response.status_code == 404:
35
+ print("⚠️ Endpoint not found - checking root endpoint...")
36
+ # Try basic connectivity
37
+ response2 = requests.get(f"{OLLAMA_HOST}", headers=headers, timeout=10)
38
+ if response2.status_code == 200:
39
+ print("✓ Server is running but /api/tags endpoint not available")
40
+ else:
41
+ print(f"✗ Server returned: {response2.status_code}")
42
  else:
43
  print(f"Error: {response.text}")
44
  except Exception as e: