import gradio as gr from transformers import pipeline from PIL import Image # ---------- 1. Load model ---------- emotion_model = pipeline( task="image-classification", model="trpakov/vit-face-expression" ) # ---------- 2. Spotify-only playlists ---------- mood_to_songs = { "happy": [ "Happy – Pharrell Williams (https://open.spotify.com/search/Happy%20Pharrell%20Williams)", "Blinding Lights – The Weeknd (https://open.spotify.com/search/Blinding%20Lights%20The%20Weeknd)", "Good as Hell – Lizzo (https://open.spotify.com/search/Good%20as%20Hell%20Lizzo)" ], "sad": [ "Someone Like You – Adele (https://open.spotify.com/search/Someone%20Like%20You%20Adele)", "Happier – Ed Sheeran (https://open.spotify.com/search/Happier%20Ed%20Sheeran)", "The Night We Met – Lord Huron (https://open.spotify.com/search/The%20Night%20We%20Met%20Lord%20Huron)" ], "angry": [ "Lose Yourself – Eminem (https://open.spotify.com/search/Lose%20Yourself%20Eminem)", "Smells Like Teen Spirit – Nirvana (https://open.spotify.com/search/Smells%20Like%20Teen%20Spirit%20Nirvana)" ], "neutral": [ "Lo-Fi Beats – playlist (https://open.spotify.com/search/lofi%20beats%20playlist)", "Chill Vibes – playlist (https://open.spotify.com/search/chill%20vibes%20playlist)" ], "surprise": [ "Can’t Stop the Feeling! – Justin Timberlake (https://open.spotify.com/search/Can%27t%20Stop%20the%20Feeling%20Justin%20Timberlake)", "On Top of the World – Imagine Dragons (https://open.spotify.com/search/On%20Top%20of%20the%20World%20Imagine%20Dragons)" ], "fear": [ "Calm Piano – playlist (https://open.spotify.com/search/calm%20piano%20playlist)", "Deep Focus – playlist (https://open.spotify.com/search/deep%20focus%20playlist)" ], "disgust": [ "Feel Good – playlist (https://open.spotify.com/search/feel%20good%20playlist)", "Happy Hits – playlist (https://open.spotify.com/search/happy%20hits%20playlist)" ] } # ---------- 3. AI logic ---------- def analyze_image(image, min_confidence: float = 0.40): if image is None: return "Please upload a photo 🙂" img = Image.fromarray(image).convert("RGB") results = emotion_model(img) top = results[0] raw_label = top["label"].lower().strip() score = float(top["score"]) mood = raw_label if raw_label in mood_to_songs and score >= min_confidence else "neutral" songs = mood_to_songs[mood] text = f""" ### 🎵 Detected mood: **{mood.capitalize()}** **Confidence:** {score:.2f} --- ### 🔊 Spotify Recommendations: """ for s in songs: text += f"- {s}\n" text += """
Raw model predictions """ for r in results: text += f"- **{r['label']}** — {r['score']:.2f}\n" text += "
" return text # ---------- 4. UI with TRUE Spotify Dark Mode ---------- with gr.Blocks() as demo: # HEADER CARD gr.HTML("""
🌊 MoodWave
Spotify Mood-Based Recommendations

Upload a face photo. MoodWave detects your emotion and plays the right vibe.

""") with gr.Row(): # LEFT: upload with gr.Column(): gr.HTML("""

📸 Upload a photo

Use a clear face photo for best accuracy.

""") image_input = gr.Image(type="numpy", height=280, label="") submit_btn = gr.Button( "🎧 Analyze Mood", elem_id="mw-btn" ) # RIGHT: result with gr.Column(): gr.HTML("""

🎶 Your Mood & Songs

Spotify playlist suggestions will appear here.

""") output_md = gr.Markdown("Waiting for an image...", elem_id="mw-output") # FOOTER CARD gr.HTML("""

🧠 How it works

- Vision Transformer model (`trpakov/vit-face-expression`) - Hugging Face `transformers` pipeline - Custom rule-based Spotify recommender system - Fully deployed on Hugging Face Spaces
""") # Make button Spotify-green and circular gr.HTML(""" """) submit_btn.click(analyze_image, image_input, output_md) if __name__ == "__main__": demo.launch()