Elliot89 commited on
Commit
9e1546b
·
verified ·
1 Parent(s): 7e5fe1b

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +58 -217
  2. requirements.txt +18 -21
app.py CHANGED
@@ -1,251 +1,121 @@
1
  import os
2
  import datetime
3
  import logging
 
4
  import base64
5
  import uuid
 
6
  import cv2
7
  import pandas as pd
8
  import numpy as np
9
  import librosa
10
  import torch
11
  from transformers import Wav2Vec2ForSequenceClassification, Wav2Vec2FeatureExtractor
 
 
12
  from flask import Flask, request, jsonify, render_template
13
- from pydub import AudioSegment
14
 
15
- # Initialize Flask app
16
  app = Flask(__name__)
17
-
18
- # Configure logging
19
  logging.basicConfig(
20
  level=logging.INFO,
21
  format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
22
  )
23
 
24
- # File paths
25
  LOG_FILE = "wellbeing_logs.csv"
26
  CAPTURED_IMAGE_DIR = "captured_images"
27
  TEMP_AUDIO_DIR = "temp_audio"
28
 
29
- # Create directories
30
  os.makedirs(CAPTURED_IMAGE_DIR, exist_ok=True)
31
  os.makedirs(TEMP_AUDIO_DIR, exist_ok=True)
32
 
33
- # Configure FFmpeg for PythonAnywhere
34
- AudioSegment.converter = "/usr/bin/ffmpeg"
35
- AudioSegment.ffprobe = "/usr/bin/ffprobe"
36
-
37
- # Global model variables
38
  voice_model = None
39
  voice_feature_extractor = None
40
 
41
  def load_voice_emotion_model():
42
- """Load voice emotion recognition model"""
43
  global voice_model, voice_feature_extractor
44
  if voice_model is None:
45
- logging.info("Loading voice emotion model...")
46
  model_name = "superb/wav2vec2-base-superb-er"
47
  voice_model = Wav2Vec2ForSequenceClassification.from_pretrained(model_name)
48
  voice_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(model_name)
49
- logging.info("Voice emotion model loaded successfully.")
50
  return voice_model, voice_feature_extractor
51
 
52
- def expand_voice_emotion_label(short_label):
53
- """Expand abbreviated emotion labels"""
54
- voice_label_mapping = {
55
- 'sad': 'Sadness',
56
- 'ang': 'Anger',
57
- 'hap': 'Happiness',
58
- 'neu': 'Neutral',
59
- 'fea': 'Fear',
60
- 'dis': 'Disgust',
61
- 'sur': 'Surprise',
62
- 'calm': 'Calm',
63
- 'anxious': 'Anxious',
64
- 'stressed': 'Stressed'
65
- }
66
- return voice_label_mapping.get(short_label.lower(), short_label.title())
67
-
68
  def analyze_voice_emotion(audio_file_path):
69
- """Analyze emotion from audio file"""
70
  try:
71
  model, feature_extractor = load_voice_emotion_model()
72
  y, sr = librosa.load(audio_file_path, sr=16000, mono=True)
73
-
74
  if y.shape[0] == 0:
75
  logging.warning(f"Audio file {audio_file_path} was empty.")
76
  return "Error: Invalid or empty audio"
77
-
78
  inputs = feature_extractor(y, sampling_rate=sr, return_tensors="pt", padding=True)
79
-
80
  with torch.no_grad():
81
  logits = model(**inputs).logits
82
- predicted_id = torch.argmax(logits, dim=-1).item()
83
-
84
- raw_emotion = model.config.id2label[predicted_id]
85
- return expand_voice_emotion_label(raw_emotion)
86
-
87
  except Exception as e:
88
- logging.exception(f"Voice emotion analysis failed: {e}")
89
  return "Error: Voice analysis failed"
90
 
91
- def analyze_emotion_from_data(image_bytes, detector_backend="opencv"):
92
- """Analyze emotion from image data using OpenCV (lightweight)"""
93
  try:
94
- if not image_bytes or len(image_bytes) == 0:
95
- logging.error("Empty image data received")
96
- return "Error: Empty image data"
97
-
98
  nparr = np.frombuffer(image_bytes, np.uint8)
99
  img_np = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
100
-
101
  if img_np is None:
102
- logging.error("Could not decode image")
103
  return "Error: Could not decode image"
104
 
105
- # Use DeepFace for emotion detection
106
- from deepface import DeepFace
107
-
108
- result = DeepFace.analyze(
109
- img_path=img_np,
110
- actions=['emotion'],
111
- detector_backend=detector_backend,
112
- enforce_detection=False
113
- )
114
-
 
 
 
115
  if isinstance(result, list) and len(result) > 0:
116
  return result[0].get("dominant_emotion", "No face detected")
117
- return "No face detected"
118
-
119
  except Exception as e:
120
- logging.exception(f"Face emotion analysis failed: {e}")
121
  return "Error: Face analysis failed"
122
 
123
  def assess_stress_enhanced(face_emotion, sleep_hours, activity_level, voice_emotion):
124
- """Calculate stress score and generate feedback"""
125
  activity_map = {"Very Low": 3, "Low": 2, "Moderate": 1, "High": 0}
126
- emotion_map = {
127
- "angry": 2, "disgust": 2, "fear": 2, "sad": 2,
128
- "neutral": 1, "surprise": 1, "happy": 0
129
- }
130
-
131
  face_emotion_score = emotion_map.get(str(face_emotion).lower(), 1)
132
  voice_emotion_score = emotion_map.get(str(voice_emotion).lower(), 1)
133
  emotion_score = round((face_emotion_score + voice_emotion_score) / 2) if voice_emotion != "N/A" else face_emotion_score
134
  activity_score = activity_map.get(str(activity_level), 1)
135
-
136
  try:
137
  sleep_hours = float(sleep_hours)
138
  sleep_score = 0 if sleep_hours >= 7 else (1 if sleep_hours >= 5 else 2)
139
  except (ValueError, TypeError):
140
  sleep_score, sleep_hours = 2, 0
141
-
142
  stress_score = emotion_score + activity_score + sleep_score
143
-
144
- feedback = f"""
145
- <div class="assessment-container">
146
- <div class="assessment-header">
147
- <h3>Your Wellbeing Assessment</h3>
148
- <div class="stress-level-indicator level-{min(stress_score, 6)}">
149
- Stress Level: <strong>{stress_score}/8</strong>
150
- </div>
151
- </div>
152
- <div class="assessment-breakdown">
153
- <div class="factor-row">
154
- <span class="factor-label">Facial Expression:</span>
155
- <span class="factor-value">{face_emotion}</span>
156
- <span class="factor-score">({face_emotion_score} pts)</span>
157
- </div>
158
- <div class="factor-row">
159
- <span class="factor-label">Voice Tone:</span>
160
- <span class="factor-value">{voice_emotion}</span>
161
- <span class="factor-score">({voice_emotion_score} pts)</span>
162
- </div>
163
- <div class="factor-row">
164
- <span class="factor-label">Sleep Duration:</span>
165
- <span class="factor-value">{sleep_hours} hours</span>
166
- <span class="factor-score">({sleep_score} pts)</span>
167
- </div>
168
- <div class="factor-row">
169
- <span class="factor-label">Activity Level:</span>
170
- <span class="factor-value">{activity_level}</span>
171
- <span class="factor-score">({activity_score} pts)</span>
172
- </div>
173
- </div>
174
- """
175
-
176
  if stress_score <= 2:
177
- feedback += '<div class="assessment-message positive">🌟 Great! You\'re in a good mental space.</div>'
178
  elif stress_score <= 4:
179
- feedback += '<div class="assessment-message neutral">😌 You\'re managing well. Consider mindfulness practices.</div>'
180
  else:
181
- feedback += '<div class="assessment-message concerning">🤗 Consider self-care activities to reduce stress.</div>'
182
-
183
- feedback += "</div>"
184
  return feedback, stress_score
185
 
186
- def generate_ai_insights(face_emotion, voice_emotion, sleep_hours, activity_level, stress_score):
187
- """Generate personalized AI insights"""
188
- insights = []
189
-
190
- if 'sad' in str(face_emotion).lower() or 'angry' in str(face_emotion).lower():
191
- insights.append("Try mood-lifting activities like listening to music or spending time in nature")
192
-
193
- if str(voice_emotion).lower() in ['sadness', 'anger', 'stressed']:
194
- insights.append("Practice deep breathing to release vocal tension")
195
-
196
- if float(sleep_hours) < 6:
197
- insights.append("Prioritize sleep hygiene for better emotional regulation")
198
-
199
- if str(activity_level).lower() == 'very low':
200
- insights.append("Gentle movement like stretching can help reduce stress")
201
-
202
- if stress_score >= 5:
203
- insights.append("Connect with supportive people in your life")
204
-
205
- if not insights:
206
- insights.append("Keep up your healthy habits! You're doing great.")
207
-
208
- return insights[:3]
209
-
210
- def get_crisis_resources():
211
- """Return crisis support resources"""
212
- return [
213
- {"name": "Crisis Text Line", "number": "Text HOME to 741741", "description": "24/7 crisis support via text"},
214
- {"name": "National Suicide Prevention Lifeline", "number": "988", "description": "Free and confidential support 24/7"},
215
- {"name": "Veterans Crisis Line", "number": "1-800-273-8255", "description": "Support for veterans"},
216
- {"name": "Emergency Services", "number": "911", "description": "Immediate emergency assistance"}
217
- ]
218
-
219
- def get_coping_techniques():
220
- """Return coping techniques"""
221
- return [
222
- {
223
- "name": "Deep Breathing",
224
- "description": "4-7-8 breathing pattern to reduce anxiety",
225
- "instructions": "Breathe in for 4 counts, hold for 7, exhale for 8. Repeat 4 times.",
226
- "duration": "2-3 minutes"
227
- },
228
- {
229
- "name": "Grounding Exercise",
230
- "description": "5-4-3-2-1 sensory technique",
231
- "instructions": "Name 5 things you see, 4 you can touch, 3 you hear, 2 you smell, 1 you taste.",
232
- "duration": "3-5 minutes"
233
- },
234
- {
235
- "name": "Progressive Relaxation",
236
- "description": "Tense and release muscle groups",
237
- "instructions": "Start with your toes, tense for 5 seconds, then relax. Move up through your body.",
238
- "duration": "10-15 minutes"
239
- },
240
- {
241
- "name": "Mindful Meditation",
242
- "description": "Focus on present moment awareness",
243
- "instructions": "Sit quietly, focus on your breath, notice thoughts without judgment.",
244
- "duration": "5-20 minutes"
245
- }
246
- ]
247
-
248
- # Flask Routes
249
  @app.route('/')
250
  def index():
251
  return render_template('index.html')
@@ -253,17 +123,15 @@ def index():
253
  @app.route('/analyze_face', methods=['POST'])
254
  def analyze_face_endpoint():
255
  data = request.json
256
- detector = data.get('detector', 'opencv')
257
  image_data = base64.b64decode(data['image'].split(',')[1])
258
  emotion = analyze_emotion_from_data(image_data, detector_backend=detector)
259
-
260
  image_path = "N/A"
261
- if not emotion.startswith("Error:") and emotion != "No face detected":
262
  filename = f"face_{uuid.uuid4()}.jpg"
263
  image_path = os.path.join(CAPTURED_IMAGE_DIR, filename)
264
  with open(image_path, "wb") as f:
265
  f.write(image_data)
266
-
267
  return jsonify({'emotion': emotion, 'image_path': image_path})
268
 
269
  @app.route('/analyze_voice', methods=['POST'])
@@ -271,27 +139,15 @@ def analyze_voice_endpoint():
271
  audio_file = request.files.get('audio')
272
  if not audio_file:
273
  return jsonify({'error': 'No audio file provided'}), 400
274
-
275
- webm_filename = f"{uuid.uuid4()}.webm"
276
- wav_filename = webm_filename.replace(".webm", ".wav")
277
-
278
- webm_filepath = os.path.join(TEMP_AUDIO_DIR, webm_filename)
279
- wav_filepath = os.path.join(TEMP_AUDIO_DIR, wav_filename)
280
-
281
  try:
282
- audio_file.save(webm_filepath)
283
- sound = AudioSegment.from_file(webm_filepath, format="webm")
284
- sound.export(wav_filepath, format="wav")
285
- emotion = analyze_voice_emotion(wav_filepath)
286
- return jsonify({'voice_emotion': emotion})
287
- except Exception as e:
288
- logging.exception(f"Error in voice pipeline: {e}")
289
- return jsonify({'error': str(e)}), 500
290
  finally:
291
- if os.path.exists(webm_filepath):
292
- os.remove(webm_filepath)
293
- if os.path.exists(wav_filepath):
294
- os.remove(wav_filepath)
295
 
296
  @app.route('/log_checkin', methods=['POST'])
297
  def log_checkin_endpoint():
@@ -299,7 +155,7 @@ def log_checkin_endpoint():
299
  feedback, stress_score = assess_stress_enhanced(
300
  data['emotion'], data['sleep_hours'], data['activity_level'], data['voice_emotion']
301
  )
302
-
303
  new_log_entry = {
304
  "timestamp": datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
305
  "face_emotion": data['emotion'],
@@ -307,26 +163,14 @@ def log_checkin_endpoint():
307
  "sleep_hours": data['sleep_hours'],
308
  "activity_level": data['activity_level'],
309
  "stress_score": stress_score,
310
- "detector_backend": data.get('detector', 'opencv'),
311
- "image_path": data.get('image_path', 'N/A'),
312
- "mood": data.get('mood', 'Not specified')
313
  }
314
-
315
- ai_insights = generate_ai_insights(
316
- data['emotion'], data.get('voice_emotion', 'N/A'),
317
- data['sleep_hours'], data['activity_level'], stress_score
318
- )
319
-
320
  try:
321
  header = not os.path.exists(LOG_FILE)
322
  df_new = pd.DataFrame([new_log_entry])
323
  df_new.to_csv(LOG_FILE, mode='a', header=header, index=False)
324
- return jsonify({
325
- 'feedback': feedback,
326
- 'stress_score': stress_score,
327
- 'ai_insights': ai_insights,
328
- 'status': 'success'
329
- })
330
  except Exception as e:
331
  logging.exception(f"Could not save log: {e}")
332
  return jsonify({'error': f'Could not save log: {e}'}), 500
@@ -337,6 +181,7 @@ def get_logs_endpoint():
337
  return jsonify({'data': [], 'columns': []})
338
  try:
339
  df = pd.read_csv(LOG_FILE)
 
340
  return jsonify({
341
  'data': df.to_dict(orient='records'),
342
  'columns': df.columns.tolist()
@@ -356,15 +201,11 @@ def clear_logs_endpoint():
356
  if os.path.exists(directory):
357
  for f in os.listdir(directory):
358
  os.remove(os.path.join(directory, f))
359
- return jsonify({'status': 'success', 'message': 'All logs cleared.'})
360
  except Exception as e:
361
  logging.exception(f"Error clearing logs: {e}")
362
  return jsonify({'status': 'error', 'message': str(e)}), 500
363
 
364
- @app.route('/get_crisis_resources', methods=['GET'])
365
- def get_crisis_resources_endpoint():
366
- return jsonify(get_crisis_resources())
367
-
368
- @app.route('/get_coping_techniques', methods=['GET'])
369
- def get_coping_techniques_endpoint():
370
- return jsonify(get_coping_techniques())
 
1
  import os
2
  import datetime
3
  import logging
4
+ import io
5
  import base64
6
  import uuid
7
+
8
  import cv2
9
  import pandas as pd
10
  import numpy as np
11
  import librosa
12
  import torch
13
  from transformers import Wav2Vec2ForSequenceClassification, Wav2Vec2FeatureExtractor
14
+ from deepface import DeepFace
15
+
16
  from flask import Flask, request, jsonify, render_template
 
17
 
18
+ # --- App & Logger Setup ---
19
  app = Flask(__name__)
 
 
20
  logging.basicConfig(
21
  level=logging.INFO,
22
  format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
23
  )
24
 
25
+ # --- Constants & Directory Setup ---
26
  LOG_FILE = "wellbeing_logs.csv"
27
  CAPTURED_IMAGE_DIR = "captured_images"
28
  TEMP_AUDIO_DIR = "temp_audio"
29
 
 
30
  os.makedirs(CAPTURED_IMAGE_DIR, exist_ok=True)
31
  os.makedirs(TEMP_AUDIO_DIR, exist_ok=True)
32
 
33
+ # --- Caching the Model ---
 
 
 
 
34
  voice_model = None
35
  voice_feature_extractor = None
36
 
37
  def load_voice_emotion_model():
 
38
  global voice_model, voice_feature_extractor
39
  if voice_model is None:
40
+ logging.info("Loading voice emotion model for the first time...")
41
  model_name = "superb/wav2vec2-base-superb-er"
42
  voice_model = Wav2Vec2ForSequenceClassification.from_pretrained(model_name)
43
  voice_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(model_name)
44
+ logging.info("Voice emotion model loaded.")
45
  return voice_model, voice_feature_extractor
46
 
47
+ # --- Analysis Functions ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
  def analyze_voice_emotion(audio_file_path):
 
49
  try:
50
  model, feature_extractor = load_voice_emotion_model()
51
  y, sr = librosa.load(audio_file_path, sr=16000, mono=True)
 
52
  if y.shape[0] == 0:
53
  logging.warning(f"Audio file {audio_file_path} was empty.")
54
  return "Error: Invalid or empty audio"
 
55
  inputs = feature_extractor(y, sampling_rate=sr, return_tensors="pt", padding=True)
 
56
  with torch.no_grad():
57
  logits = model(**inputs).logits
58
+ predicted_id = torch.argmax(logits, dim=-1).item()
59
+ return model.config.id2label[predicted_id]
 
 
 
60
  except Exception as e:
61
+ logging.exception(f"Voice emotion analysis failed for file {audio_file_path}: {e}")
62
  return "Error: Voice analysis failed"
63
 
64
+ def analyze_emotion_from_data(image_bytes, detector_backend="retinaface"):
 
65
  try:
 
 
 
 
66
  nparr = np.frombuffer(image_bytes, np.uint8)
67
  img_np = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
 
68
  if img_np is None:
 
69
  return "Error: Could not decode image"
70
 
71
+ # Use a fallback detector if the selected one fails
72
+ try:
73
+ result = DeepFace.analyze(
74
+ img_path=img_np, actions=['emotion'],
75
+ detector_backend=detector_backend, enforce_detection=False
76
+ )
77
+ except Exception as detector_error:
78
+ logging.warning(f"Detector '{detector_backend}' failed: {detector_error}. Falling back to 'opencv'.")
79
+ result = DeepFace.analyze(
80
+ img_path=img_np, actions=['emotion'],
81
+ detector_backend='opencv', enforce_detection=False
82
+ )
83
+
84
  if isinstance(result, list) and len(result) > 0:
85
  return result[0].get("dominant_emotion", "No face detected")
86
+ else:
87
+ return "No face detected"
88
  except Exception as e:
89
+ logging.exception(f"Face emotion analysis failed with backend {detector_backend}: {e}")
90
  return "Error: Face analysis failed"
91
 
92
  def assess_stress_enhanced(face_emotion, sleep_hours, activity_level, voice_emotion):
 
93
  activity_map = {"Very Low": 3, "Low": 2, "Moderate": 1, "High": 0}
94
+ emotion_map = { "angry": 2, "disgust": 2, "fear": 2, "sad": 2, "neutral": 1, "surprise": 1, "happy": 0 }
 
 
 
 
95
  face_emotion_score = emotion_map.get(str(face_emotion).lower(), 1)
96
  voice_emotion_score = emotion_map.get(str(voice_emotion).lower(), 1)
97
  emotion_score = round((face_emotion_score + voice_emotion_score) / 2) if voice_emotion != "N/A" else face_emotion_score
98
  activity_score = activity_map.get(str(activity_level), 1)
 
99
  try:
100
  sleep_hours = float(sleep_hours)
101
  sleep_score = 0 if sleep_hours >= 7 else (1 if sleep_hours >= 5 else 2)
102
  except (ValueError, TypeError):
103
  sleep_score, sleep_hours = 2, 0
 
104
  stress_score = emotion_score + activity_score + sleep_score
105
+ feedback = f"**Your potential stress score is {stress_score} (lower is better).**\n\n**Breakdown:**\n"
106
+ feedback += f"- Face Emotion: {face_emotion} (score: {face_emotion_score})\n"
107
+ feedback += f"- Voice Emotion: {voice_emotion} (score: {voice_emotion_score})\n"
108
+ feedback += f"- Sleep: {sleep_hours} hours (score: {sleep_score})\n"
109
+ feedback += f"- Activity: {activity_level} (score: {activity_score})\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
110
  if stress_score <= 2:
111
+ feedback += "\nGreat job! You seem to be in a good space."
112
  elif stress_score <= 4:
113
+ feedback += "\nYou're doing okay, but remember to be mindful of your rest and mood."
114
  else:
115
+ feedback += "\nConsider taking some time for self-care. Improving sleep or gentle activity might help."
 
 
116
  return feedback, stress_score
117
 
118
+ # --- Flask Routes ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119
  @app.route('/')
120
  def index():
121
  return render_template('index.html')
 
123
  @app.route('/analyze_face', methods=['POST'])
124
  def analyze_face_endpoint():
125
  data = request.json
126
+ detector = data.get('detector', 'retinaface')
127
  image_data = base64.b64decode(data['image'].split(',')[1])
128
  emotion = analyze_emotion_from_data(image_data, detector_backend=detector)
 
129
  image_path = "N/A"
130
+ if not emotion.startswith("Error:") and not emotion == "No face detected":
131
  filename = f"face_{uuid.uuid4()}.jpg"
132
  image_path = os.path.join(CAPTURED_IMAGE_DIR, filename)
133
  with open(image_path, "wb") as f:
134
  f.write(image_data)
 
135
  return jsonify({'emotion': emotion, 'image_path': image_path})
136
 
137
  @app.route('/analyze_voice', methods=['POST'])
 
139
  audio_file = request.files.get('audio')
140
  if not audio_file:
141
  return jsonify({'error': 'No audio file provided'}), 400
142
+ temp_filename = f"{uuid.uuid4()}.webm"
143
+ temp_filepath = os.path.join(TEMP_AUDIO_DIR, temp_filename)
 
 
 
 
 
144
  try:
145
+ audio_file.save(temp_filepath)
146
+ emotion = analyze_voice_emotion(temp_filepath)
 
 
 
 
 
 
147
  finally:
148
+ if os.path.exists(temp_filepath):
149
+ os.remove(temp_filepath)
150
+ return jsonify({'voice_emotion': emotion})
 
151
 
152
  @app.route('/log_checkin', methods=['POST'])
153
  def log_checkin_endpoint():
 
155
  feedback, stress_score = assess_stress_enhanced(
156
  data['emotion'], data['sleep_hours'], data['activity_level'], data['voice_emotion']
157
  )
158
+ # *** FIX: Format timestamp as a consistent string BEFORE saving ***
159
  new_log_entry = {
160
  "timestamp": datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
161
  "face_emotion": data['emotion'],
 
163
  "sleep_hours": data['sleep_hours'],
164
  "activity_level": data['activity_level'],
165
  "stress_score": stress_score,
166
+ "detector_backend": data.get('detector', 'retinaface'),
167
+ "image_path": data.get('image_path', 'N/A')
 
168
  }
 
 
 
 
 
 
169
  try:
170
  header = not os.path.exists(LOG_FILE)
171
  df_new = pd.DataFrame([new_log_entry])
172
  df_new.to_csv(LOG_FILE, mode='a', header=header, index=False)
173
+ return jsonify({'feedback': feedback, 'stress_score': stress_score, 'status': 'success'})
 
 
 
 
 
174
  except Exception as e:
175
  logging.exception(f"Could not save log: {e}")
176
  return jsonify({'error': f'Could not save log: {e}'}), 500
 
181
  return jsonify({'data': [], 'columns': []})
182
  try:
183
  df = pd.read_csv(LOG_FILE)
184
+ # *** FIX: No need to parse/reformat timestamps. They are already correct strings. ***
185
  return jsonify({
186
  'data': df.to_dict(orient='records'),
187
  'columns': df.columns.tolist()
 
201
  if os.path.exists(directory):
202
  for f in os.listdir(directory):
203
  os.remove(os.path.join(directory, f))
204
+ return jsonify({'status': 'success', 'message': 'All logs and images cleared.'})
205
  except Exception as e:
206
  logging.exception(f"Error clearing logs: {e}")
207
  return jsonify({'status': 'error', 'message': str(e)}), 500
208
 
209
+ if __name__ == '__main__':
210
+ load_voice_emotion_model()
211
+ app.run(debug=True, host='0.0.0.0')
 
 
 
 
requirements.txt CHANGED
@@ -1,22 +1,19 @@
1
- Flask==3.0.0
2
- gunicorn==21.2.0
3
- pandas==2.1.4
4
- numpy==1.26.2
5
- opencv-python-headless==4.8.1.78
6
- librosa==0.10.1
7
- soundfile==0.12.1
8
- torch==2.1.1
9
- torchvision==0.16.1
10
- torchaudio==2.1.1
11
- transformers==4.36.2
12
- pydub==0.25.1
13
- Pillow==10.1.0
14
- audioread==3.0.1
15
 
16
- # --- CRITICAL: TensorFlow and Keras Compatibility Fix ---
17
- tensorflow==2.15.1
18
- tf-keras==2.15.1 # This is the key fix!
19
- deepface==0.0.92
20
- mtcnn==0.1.1
21
- retina-face==0.0.13
22
- # -------------------------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
1
+ # requirements.txt
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
+ Flask
4
+ pandas
5
+ numpy
6
+ opencv-python
7
+ deepface
8
+ librosa
9
+ torch
10
+ transformers
11
+ soundfile
12
+ gunicorn
13
+ tf-keras
14
+ accelerate
15
+ safetensors
16
+ mediapipe
17
+ sentencepiece
18
+ scipy
19
+ requests