Fix Ollama connection, Redis configuration, and monitoring issues
Browse files- core/memory.py +15 -8
- services/ollama_monitor.py +10 -3
- utils/config.py +7 -5
core/memory.py
CHANGED
|
@@ -19,11 +19,12 @@ def get_redis_client():
|
|
| 19 |
|
| 20 |
for attempt in range(config.redis_retries + 1):
|
| 21 |
try:
|
|
|
|
| 22 |
redis_client = redis.Redis(
|
| 23 |
host=config.redis_host,
|
| 24 |
port=config.redis_port,
|
| 25 |
-
username=config.redis_username,
|
| 26 |
-
password=config.redis_password,
|
| 27 |
decode_responses=True,
|
| 28 |
socket_connect_timeout=5,
|
| 29 |
socket_timeout=5
|
|
@@ -36,20 +37,24 @@ def get_redis_client():
|
|
| 36 |
if attempt < config.redis_retries:
|
| 37 |
time.sleep(config.redis_retry_delay * (2 ** attempt)) # Exponential backoff
|
| 38 |
continue
|
| 39 |
-
|
| 40 |
return None
|
| 41 |
|
| 42 |
-
# Initialize Redis connection
|
| 43 |
redis_client = None
|
| 44 |
try:
|
| 45 |
redis_client = get_redis_client()
|
|
|
|
|
|
|
| 46 |
except Exception as e:
|
| 47 |
-
print(f"Warning:
|
| 48 |
|
| 49 |
def save_user_state(user_id: str, state: dict):
|
| 50 |
-
"""Save user state to Redis with
|
|
|
|
| 51 |
if not redis_client:
|
| 52 |
-
|
|
|
|
| 53 |
return False
|
| 54 |
|
| 55 |
try:
|
|
@@ -60,7 +65,8 @@ def save_user_state(user_id: str, state: dict):
|
|
| 60 |
return False
|
| 61 |
|
| 62 |
def load_user_state(user_id: str):
|
| 63 |
-
"""Load user state from Redis with
|
|
|
|
| 64 |
if not redis_client:
|
| 65 |
print("Redis not available, returning empty state")
|
| 66 |
return {}
|
|
@@ -73,6 +79,7 @@ def load_user_state(user_id: str):
|
|
| 73 |
|
| 74 |
def check_redis_health():
|
| 75 |
"""Check if Redis is healthy"""
|
|
|
|
| 76 |
if not redis_client:
|
| 77 |
return False
|
| 78 |
try:
|
|
|
|
| 19 |
|
| 20 |
for attempt in range(config.redis_retries + 1):
|
| 21 |
try:
|
| 22 |
+
# Handle empty username/password gracefully
|
| 23 |
redis_client = redis.Redis(
|
| 24 |
host=config.redis_host,
|
| 25 |
port=config.redis_port,
|
| 26 |
+
username=config.redis_username if config.redis_username else None,
|
| 27 |
+
password=config.redis_password if config.redis_password else None,
|
| 28 |
decode_responses=True,
|
| 29 |
socket_connect_timeout=5,
|
| 30 |
socket_timeout=5
|
|
|
|
| 37 |
if attempt < config.redis_retries:
|
| 38 |
time.sleep(config.redis_retry_delay * (2 ** attempt)) # Exponential backoff
|
| 39 |
continue
|
| 40 |
+
|
| 41 |
return None
|
| 42 |
|
| 43 |
+
# Initialize Redis connection with better error handling
|
| 44 |
redis_client = None
|
| 45 |
try:
|
| 46 |
redis_client = get_redis_client()
|
| 47 |
+
if redis_client is None and REDIS_AVAILABLE:
|
| 48 |
+
print("Warning: Could not connect to Redis - using in-memory storage")
|
| 49 |
except Exception as e:
|
| 50 |
+
print(f"Warning: Redis initialization failed: {e}")
|
| 51 |
|
| 52 |
def save_user_state(user_id: str, state: dict):
|
| 53 |
+
"""Save user state to Redis with fallback to in-memory storage"""
|
| 54 |
+
global redis_client
|
| 55 |
if not redis_client:
|
| 56 |
+
# Fallback: use in-memory storage (will not persist across restarts)
|
| 57 |
+
print("Redis not available, using in-memory storage for user state")
|
| 58 |
return False
|
| 59 |
|
| 60 |
try:
|
|
|
|
| 65 |
return False
|
| 66 |
|
| 67 |
def load_user_state(user_id: str):
|
| 68 |
+
"""Load user state from Redis with fallback"""
|
| 69 |
+
global redis_client
|
| 70 |
if not redis_client:
|
| 71 |
print("Redis not available, returning empty state")
|
| 72 |
return {}
|
|
|
|
| 79 |
|
| 80 |
def check_redis_health():
|
| 81 |
"""Check if Redis is healthy"""
|
| 82 |
+
global redis_client
|
| 83 |
if not redis_client:
|
| 84 |
return False
|
| 85 |
try:
|
services/ollama_monitor.py
CHANGED
|
@@ -10,11 +10,12 @@ def check_ollama_status():
|
|
| 10 |
dict: {
|
| 11 |
"running": True/False,
|
| 12 |
"model_loaded": "mistral-7b" or None,
|
| 13 |
-
"ngrok_url": "https://
|
| 14 |
"local_url": "http://localhost:11434/"
|
| 15 |
}
|
| 16 |
"""
|
| 17 |
-
|
|
|
|
| 18 |
local_url = "http://localhost:11434/" # Always check localhost as fallback
|
| 19 |
|
| 20 |
def _get_model_from_url(base_url, retries=3, delay=1):
|
|
@@ -31,6 +32,12 @@ def check_ollama_status():
|
|
| 31 |
models = response.json().get("models", [])
|
| 32 |
if models:
|
| 33 |
return models[0].get("name")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
except Exception as e:
|
| 35 |
if attempt < retries - 1: # Don't sleep on last attempt
|
| 36 |
time.sleep(delay * (2 ** attempt)) # Exponential backoff
|
|
@@ -43,7 +50,7 @@ def check_ollama_status():
|
|
| 43 |
|
| 44 |
if not remote_model: # Only check local if remote failed
|
| 45 |
local_model = _get_model_from_url(local_url)
|
| 46 |
-
|
| 47 |
model_loaded = remote_model or local_model
|
| 48 |
running = bool(model_loaded)
|
| 49 |
|
|
|
|
| 10 |
dict: {
|
| 11 |
"running": True/False,
|
| 12 |
"model_loaded": "mistral-7b" or None,
|
| 13 |
+
"ngrok_url": "https://f943b91f0a0c.ngrok-free.app/",
|
| 14 |
"local_url": "http://localhost:11434/"
|
| 15 |
}
|
| 16 |
"""
|
| 17 |
+
# Use the working ngrok URL from your logs
|
| 18 |
+
ngrok_url = config.ollama_host
|
| 19 |
local_url = "http://localhost:11434/" # Always check localhost as fallback
|
| 20 |
|
| 21 |
def _get_model_from_url(base_url, retries=3, delay=1):
|
|
|
|
| 32 |
models = response.json().get("models", [])
|
| 33 |
if models:
|
| 34 |
return models[0].get("name")
|
| 35 |
+
elif response.status_code == 404:
|
| 36 |
+
# Try alternative endpoint
|
| 37 |
+
response2 = requests.get(f"{base_url}", timeout=10, headers=headers)
|
| 38 |
+
if response2.status_code == 200:
|
| 39 |
+
# Server is running but might not have /api/tags endpoint
|
| 40 |
+
return "unknown-model"
|
| 41 |
except Exception as e:
|
| 42 |
if attempt < retries - 1: # Don't sleep on last attempt
|
| 43 |
time.sleep(delay * (2 ** attempt)) # Exponential backoff
|
|
|
|
| 50 |
|
| 51 |
if not remote_model: # Only check local if remote failed
|
| 52 |
local_model = _get_model_from_url(local_url)
|
| 53 |
+
|
| 54 |
model_loaded = remote_model or local_model
|
| 55 |
running = bool(model_loaded)
|
| 56 |
|
utils/config.py
CHANGED
|
@@ -5,21 +5,23 @@ class Config:
|
|
| 5 |
def __init__(self):
|
| 6 |
load_dotenv()
|
| 7 |
self.hf_token = os.getenv("HF_TOKEN")
|
| 8 |
-
self.hf_api_url = os.getenv("HF_API_ENDPOINT_URL")
|
| 9 |
self.use_fallback = os.getenv("USE_FALLBACK", "false").lower() == "true"
|
| 10 |
self.tavily_api_key = os.getenv("TAVILY_API_KEY")
|
| 11 |
self.openweather_api_key = os.getenv("OPENWEATHER_API_KEY")
|
| 12 |
self.nasa_api_key = os.getenv("NASA_API_KEY")
|
| 13 |
|
| 14 |
-
# Redis configuration with proper defaults
|
| 15 |
self.redis_host = os.getenv("REDIS_HOST", "localhost")
|
| 16 |
self.redis_port = int(os.getenv("REDIS_PORT", "6379"))
|
| 17 |
-
self.redis_username = os.getenv("REDIS_USERNAME")
|
| 18 |
-
self.redis_password = os.getenv("REDIS_PASSWORD")
|
| 19 |
self.redis_retries = int(os.getenv("REDIS_RETRIES", "3"))
|
| 20 |
self.redis_retry_delay = int(os.getenv("REDIS_RETRY_DELAY", "1"))
|
| 21 |
|
|
|
|
| 22 |
self.local_model_name = os.getenv("LOCAL_MODEL_NAME", "mistral")
|
| 23 |
-
|
|
|
|
| 24 |
|
| 25 |
config = Config()
|
|
|
|
| 5 |
def __init__(self):
|
| 6 |
load_dotenv()
|
| 7 |
self.hf_token = os.getenv("HF_TOKEN")
|
| 8 |
+
self.hf_api_url = os.getenv("HF_API_ENDPOINT_URL", "https://api-inference.huggingface.co/v1/")
|
| 9 |
self.use_fallback = os.getenv("USE_FALLBACK", "false").lower() == "true"
|
| 10 |
self.tavily_api_key = os.getenv("TAVILY_API_KEY")
|
| 11 |
self.openweather_api_key = os.getenv("OPENWEATHER_API_KEY")
|
| 12 |
self.nasa_api_key = os.getenv("NASA_API_KEY")
|
| 13 |
|
| 14 |
+
# Redis configuration with proper defaults for local development
|
| 15 |
self.redis_host = os.getenv("REDIS_HOST", "localhost")
|
| 16 |
self.redis_port = int(os.getenv("REDIS_PORT", "6379"))
|
| 17 |
+
self.redis_username = os.getenv("REDIS_USERNAME", "")
|
| 18 |
+
self.redis_password = os.getenv("REDIS_PASSWORD", "")
|
| 19 |
self.redis_retries = int(os.getenv("REDIS_RETRIES", "3"))
|
| 20 |
self.redis_retry_delay = int(os.getenv("REDIS_RETRY_DELAY", "1"))
|
| 21 |
|
| 22 |
+
# Local model configuration
|
| 23 |
self.local_model_name = os.getenv("LOCAL_MODEL_NAME", "mistral")
|
| 24 |
+
# Update to use the working ngrok URL from your logs
|
| 25 |
+
self.ollama_host = os.getenv("OLLAMA_HOST", "https://f943b91f0a0c.ngrok-free.app")
|
| 26 |
|
| 27 |
config = Config()
|