import gradio as gr
import pandas as pd
import os
from datetime import datetime
from argparse import ArgumentParser
from pathlib import Path
class VoiceConversionEvaluator:
def __init__(self, csv_file="voice_conversion_final.csv"):
self.csv_file = Path(csv_file)
self.data = pd.read_csv(csv_file) # Removed shuffling
self.annotator_name = None
self.results_file = None # Will be set when name is provided
self.current_index = 0
# Removed self.shuffled_order
# Initialize results file if it doesn't exist
# Moved to set_annotator_name
def set_annotator_name(self, name):
if not name.strip():
return False, "Please enter a valid name."
self.annotator_name = name.strip()
self.results_file = f"./evaluation_results/{self.annotator_name}.csv"
if not os.path.exists(self.results_file):
os.makedirs(os.path.dirname(self.results_file), exist_ok=True)
results_df = pd.DataFrame(columns=["id", "submitted_res", "annotator_name"])
results_df.to_csv(self.results_file, index=False)
return True, f"Welcome, {self.annotator_name}! Starting evaluations."
def get_current_task(self):
"""Get the current task data"""
if self.current_index >= len(self.data):
return None
row = self.data.iloc[self.current_index]
# Removed shuffling logic; always display as is
column1 = "audio1" if "audio1" in row else "coqui_vc_audio"
column2 = "audio2" if "audio2" in row else "rvc_audio"
displayed_audio1 = row[column1]
displayed_audio2 = row[column2]
audio_mapping = {"Audio A": column1, "Audio B": column2}
return {
"id": row["id"],
"source_audio": row["source_audio"],
"target_audio": row["target_audio"],
"displayed_audio1": displayed_audio1,
"displayed_audio2": displayed_audio2,
"audio_mapping": audio_mapping,
"progress": f"Task {self.current_index + 1} of {len(self.data)}",
}
def submit_result(self, choice, task_instruction):
"""Submit the evaluation result"""
if self.current_index >= len(self.data):
return "All tasks completed!", "", "", "", "", gr.update(visible=False)
task = self.get_current_task()
if task is None:
return "All tasks completed!", "", "", "", "", gr.update(visible=False)
# Convert displayed choice back to original audio1/audio2
original_choice = task["audio_mapping"][choice]
# Save result
results_df = pd.read_csv(self.results_file)
new_result = pd.DataFrame(
{
"id": [task["id"]],
"submitted_res": [original_choice],
"annotator_name": [self.annotator_name],
}
)
results_df = pd.concat([results_df, new_result], ignore_index=True)
results_df.to_csv(self.results_file, index=False)
# Move to next task
self.current_index += 1
# Get next task
next_task = self.get_current_task()
if next_task is None:
return (
"🎉 All tasks completed! Thank you for your evaluation.",
"",
"",
"",
"",
gr.update(visible=False),
)
return (
f"✅ Submitted! {next_task['progress']}",
next_task["source_audio"],
next_task["target_audio"],
next_task["displayed_audio1"],
next_task["displayed_audio2"],
gr.update(visible=True),
)
def get_initial_task(self):
"""Get the first task to display"""
task = self.get_current_task()
if task is None:
return "No tasks available", "", "", "", ""
return (
task["progress"],
task["source_audio"],
task["target_audio"],
task["displayed_audio1"],
task["displayed_audio2"],
)
def jump_to_index(self, index):
if 1 <= index <= len(self.data):
self.current_index = index - 1 # Index starts from 0 internally
return True, f"Jumped to task {index}."
return False, "Invalid index."
def go_to_previous(self):
if self.current_index > 0:
self.current_index -= 1
return True, f"Jumped to previous task {self.current_index + 1}."
return False, "Already at the first task."
def create_interface():
evaluator = VoiceConversionEvaluator("./audio_pairs.csv")
with gr.Blocks(
title="Voice Conversion Evaluation", theme=gr.themes.Soft()
) as interface:
gr.Markdown("# 🎵 Voice Conversion Evaluation")
gr.Markdown(
"Please enter your name to start the evaluation. 請輸入您的名字以開始評估工作。"
)
with gr.Row():
annotator_name_input = gr.Textbox(
label="Your Name",
placeholder="Enter your full name 輸入您的全名",
lines=1,
)
with gr.Row():
start_btn = gr.Button(
"Start Evaluation 開始評估工作", variant="primary", size="lg"
)
start_status = gr.Textbox(label="Status", interactive=False, value="")
gr.Markdown("---")
with gr.Group(visible=False) as task_group:
gr.Markdown("## 📋 Task Instructions 任務說明")
task_instruction = gr.Textbox(
label="Task Description",
value="""Voice conversion is a technology that transforms a spoken audio clip (source audio) from one speaker so that it sounds like it was spoken by another person (the target speaker), while keeping the original linguistic content unchanged. In other words, the words and intonation stay the same, but the voice identity is changed.
In this task, you will listen to two audio samples (A and B) generated by different voice conversion models. Your job is to choose which one sounds more like the intended target speaker. Both samples you will hear are generated from the same source audio and aim to mimic the voice of the same target speaker. Please listen carefully and choose the one that sounds more similar to the target voice.
Focus on the overall similarity in speaker identity, including voice quality, tone, and naturalness.
Please ignore the background noise of the audio and just judge which one better resembles the same person speaking.
語音轉換是一種技術,可以將某位說話者的語音片段(來源語音)轉換成聽起來像是由另一個人(目標說話者)所說的聲音,同時保持原本的語言內容不變。換句話說,詞句與語調維持相同,但聲音的身份(音色)會改變。
在這個任務中,你將會聆聽兩個由不同語音轉換模型生成的音訊樣本(A 與 B)。你的任務是選出哪一個聽起來更像目標說話者的聲音。這兩個樣本都是由相同的來源語音生成,並且都試圖模仿同一位目標說話者的聲音。請仔細聆聽並選擇與目標聲音更相似的一個。
請專注於整體說話者身份的相似度,包括聲音品質、音色與自然度。請忽略音訊中的背景雜音,只需判斷哪一個更像是同一個人說話。""",
lines=3,
interactive=True,
)
with gr.Row():
with gr.Column():
gr.Markdown("### 🎯 Source Audio 來源語音")
source_audio = gr.Audio(
label="Source Audio 來源語音", interactive=False
)
with gr.Column():
gr.Markdown("### 🎯 Target Audio 目標說話者的聲音")
target_audio = gr.Audio(
label="Target Audio 目標說話者的聲音", interactive=False
)
gr.Markdown("
")
gr.Markdown("---")
gr.Markdown("
")
with gr.Group():
gr.Markdown("## 🔊 Audio Comparison")
gr.Markdown(
"Please listen to both audio samples and choose which one sounds closer to target speaker's voice. 請聆聽兩個音訊樣本,並選擇哪一個聽起來更接近目標說話者的聲音。"
)
with gr.Row():
with gr.Column():
gr.Markdown("### Audio A")
audio1 = gr.Audio(label="Audio A", interactive=False)
with gr.Column():
gr.Markdown("### Audio B")
audio2 = gr.Audio(label="Audio B", interactive=False)
with gr.Row():
choice = gr.Radio(
choices=["Audio A", "Audio B"],
label="Which audio sounds closer to the target speaker's voice? 哪個音訊聽起來更接近目標說話者的聲音?",
value=None,
)
with gr.Row():
submit_btn = gr.Button(
"Submit Evaluation", variant="primary", size="lg"
)
with gr.Row():
index_dropdown = gr.Dropdown(
label="Jump to Task Index 跳到任務索引",
choices=[str(i) for i in range(1, len(evaluator.data) + 1)],
value=None,
)
jump_btn = gr.Button("Jump", variant="secondary")
previous_btn = gr.Button("Previous", variant="secondary")
# Event handlers
def on_start(name):
if not name.strip():
return (
"Please enter a valid name.",
gr.update(),
gr.update(),
gr.update(),
"",
"",
"",
"",
"",
)
success, message = evaluator.set_annotator_name(name)
if success:
task = evaluator.get_initial_task()
return (
message,
gr.update(visible=False), # Hide start components
gr.update(visible=False),
gr.update(visible=True), # Show task group
task[0],
task[1],
task[2],
task[3],
task[4],
)
else:
return (
message,
gr.update(),
gr.update(),
gr.update(),
"",
"",
"",
"",
"",
)
start_btn.click(
fn=on_start,
inputs=[annotator_name_input],
outputs=[
start_status,
annotator_name_input,
start_btn,
task_group,
start_status,
source_audio,
target_audio,
audio1,
audio2,
],
)
def on_submit(choice_value, instruction):
if choice_value is None:
gr.Warning("Please select an audio choice before submitting.")
return (
gr.update(),
gr.update(),
gr.update(),
gr.update(),
gr.update(),
gr.update(),
)
return evaluator.submit_result(choice_value, instruction)
submit_btn.click(
fn=on_submit,
inputs=[choice, task_instruction],
outputs=[
start_status,
source_audio,
target_audio,
audio1,
audio2,
submit_btn,
],
)
def on_jump(selected_index):
if selected_index is None:
return (
"Please select an index.",
gr.update(),
gr.update(),
gr.update(),
gr.update(),
gr.update(),
)
index = int(selected_index)
success, message = evaluator.jump_to_index(index)
if success:
task = evaluator.get_current_task()
if task:
return (
message,
task["progress"],
task["source_audio"],
task["target_audio"],
task["displayed_audio1"],
task["displayed_audio2"],
)
else:
return message, "No tasks available", "", "", "", ""
return (
message,
gr.update(),
gr.update(),
gr.update(),
gr.update(),
gr.update(),
)
jump_btn.click(
fn=on_jump,
inputs=[index_dropdown],
outputs=[
start_status,
start_status,
source_audio,
target_audio,
audio1,
audio2,
],
)
def on_previous():
success, message = evaluator.go_to_previous()
if success:
task = evaluator.get_current_task()
return (
message,
task["progress"],
task["source_audio"],
task["target_audio"],
task["displayed_audio1"],
task["displayed_audio2"],
)
return (
message,
gr.update(),
gr.update(),
gr.update(),
gr.update(),
gr.update(),
)
previous_btn.click(
fn=on_previous,
inputs=[],
outputs=[
start_status,
start_status,
source_audio,
target_audio,
audio1,
audio2,
],
)
return interface
if __name__ == "__main__":
interface = create_interface()
interface.launch(
server_name="0.0.0.0",
# server_port=7860,
# share=True,
debug=True,
)