Spaces:
Sleeping
Sleeping
Alexandra Zapko-Willmes
commited on
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,64 +1,43 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
from transformers import pipeline
|
| 3 |
import pandas as pd
|
| 4 |
-
import io
|
| 5 |
|
| 6 |
-
|
| 7 |
-
|
|
|
|
|
|
|
|
|
|
| 8 |
|
| 9 |
-
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
output_lines = []
|
| 21 |
|
| 22 |
-
for i, question in enumerate(questions, 1):
|
| 23 |
-
result = classifier(question, labels, multi_label=False)
|
| 24 |
-
probs = dict(zip(result['labels'], result['scores']))
|
| 25 |
-
|
| 26 |
-
output_lines.append(f"{i}. {question}")
|
| 27 |
-
for label in labels:
|
| 28 |
-
output_lines.append(f"→ {label}: {round(probs.get(label, 0.0), 3)}")
|
| 29 |
-
output_lines.append("")
|
| 30 |
-
|
| 31 |
-
row = {"Item #": i, "Item": question}
|
| 32 |
-
row.update({label: round(probs.get(label, 0.0), 3) for label in labels})
|
| 33 |
-
response_table.append(row)
|
| 34 |
-
|
| 35 |
-
return "\n".join(output_lines), None
|
| 36 |
-
|
| 37 |
-
def download_csv():
|
| 38 |
-
global response_table
|
| 39 |
-
if not response_table:
|
| 40 |
-
return None
|
| 41 |
-
df = pd.DataFrame(response_table)
|
| 42 |
-
csv_buffer = io.StringIO()
|
| 43 |
-
df.to_csv(csv_buffer, index=False)
|
| 44 |
-
return csv_buffer.getvalue()
|
| 45 |
-
|
| 46 |
-
# Gradio UI
|
| 47 |
with gr.Blocks() as demo:
|
| 48 |
-
gr.Markdown("
|
| 49 |
-
gr.Markdown("Paste questionnaire items (one per line), and provide your own response labels (comma-separated).")
|
| 50 |
|
| 51 |
with gr.Row():
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
|
| 61 |
-
|
| 62 |
-
|
|
|
|
| 63 |
|
| 64 |
demo.launch()
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
from transformers import pipeline
|
| 3 |
import pandas as pd
|
|
|
|
| 4 |
|
| 5 |
+
MODEL_MAP = {
|
| 6 |
+
"MoritzLaurer/deberta-v3-large-zeroshot-v2.0": "MoritzLaurer/deberta-v3-large-zeroshot-v2.0",
|
| 7 |
+
"MoritzLaurer/mDeBERTa-v3-base-xnli-multilingual-nli-2mil7": "MoritzLaurer/mDeBERTa-v3-base-xnli-multilingual-nli-2mil7",
|
| 8 |
+
"joeddav/xlm-roberta-large-xnli": "joeddav/xlm-roberta-large-xnli"
|
| 9 |
+
}
|
| 10 |
|
| 11 |
+
def classify_items(model_name, items_text, labels_text):
|
| 12 |
+
classifier = pipeline("zero-shot-classification", model=MODEL_MAP[model_name])
|
| 13 |
+
items = [item.strip() for item in items_text.split("\n") if item.strip()]
|
| 14 |
+
labels = [label.strip() for label in labels_text.split(",") if label.strip()]
|
| 15 |
|
| 16 |
+
results = []
|
| 17 |
+
for item in items:
|
| 18 |
+
out = classifier(item, labels, multi_label=True)
|
| 19 |
+
scores = {label: prob for label, prob in zip(out["labels"], out["scores"])}
|
| 20 |
+
scores["item"] = item
|
| 21 |
+
results.append(scores)
|
| 22 |
|
| 23 |
+
df = pd.DataFrame(results).fillna(0)
|
| 24 |
+
return df, gr.File.update(value=df.to_csv(index=False), visible=True)
|
|
|
|
| 25 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
with gr.Blocks() as demo:
|
| 27 |
+
gr.Markdown("## 🧠 Zero-Shot Questionnaire Classifier")
|
|
|
|
| 28 |
|
| 29 |
with gr.Row():
|
| 30 |
+
model_choice = gr.Dropdown(choices=list(MODEL_MAP.keys()), label="Choose a zero-shot model")
|
| 31 |
+
|
| 32 |
+
item_input = gr.Textbox(label="Enter questionnaire items (one per line)", lines=6, placeholder="I enjoy social gatherings.\nI prefer planning over spontaneity.")
|
| 33 |
+
label_input = gr.Textbox(label="Enter response options (comma-separated)", placeholder="Strongly disagree, Disagree, Neutral, Agree, Strongly agree")
|
| 34 |
+
|
| 35 |
+
run_button = gr.Button("Classify")
|
| 36 |
+
output_table = gr.Dataframe(label="Classification Results")
|
| 37 |
+
download_csv = gr.File(label="Download CSV", visible=False)
|
| 38 |
|
| 39 |
+
run_button.click(fn=classify_items,
|
| 40 |
+
inputs=[model_choice, item_input, label_input],
|
| 41 |
+
outputs=[output_table, download_csv])
|
| 42 |
|
| 43 |
demo.launch()
|