Spaces:
Running
Running
init
Browse files
.env
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
OPENROUTER_API_KEY="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"
|
| 2 |
+
BASE_URL="https://openrouter.ai/api/v1"
|
| 3 |
+
DATA="data/data.csv"
|
app.py
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from models import *
|
| 2 |
+
import gradio as gr
|
| 3 |
+
|
| 4 |
+
GPT_4 = "deepseek/deepseek-chat-v3-0324:free" #12
|
| 5 |
+
PHI_4 = "microsoft/phi-4" #2
|
| 6 |
+
PHI_3 = "microsoft/phi-3-medium-128k-instruct:free" #3
|
| 7 |
+
GEMMA_3_27B = "google/gemma-3-27b-it:free" #4
|
| 8 |
+
GEMIN_FLASH_THINK = "google/gemini-2.0-flash-thinking-exp:free" #6
|
| 9 |
+
GEMIN_FLASH = "google/gemini-flash-1.5-8b-exp" #7
|
| 10 |
+
QWEN_32B = "qwen/qwq-32b:free" #8
|
| 11 |
+
QWEN_25 = "qwen/qwen2.5-vl-72b-instruct:free" #10 #error
|
| 12 |
+
DEEPSEEK_R1 = "deepseek/deepseek-r1:free" #11
|
| 13 |
+
DEEPSEEK_R1_ZERO = "deepseek/deepseek-r1-zero:free" #13
|
| 14 |
+
META_LLAMA_MODEL = "meta-llama/Llama-3.3-70B-Instruct:free" #14
|
| 15 |
+
MISTRAL_SMALL_MODEL = "mistralai/mistral-small-3.1-24b-instruct:free" #15
|
| 16 |
+
MISTRAL_NEMO = "mistralai/mistral-nemo:free" #16
|
| 17 |
+
ZEPHYR = "huggingfaceh4/zephyr-7b-beta:free" #17
|
| 18 |
+
OLYMPIC_CODER = "open-r1/olympiccoder-32b:free" #19
|
| 19 |
+
LEARN = "google/learnlm-1.5-pro-experimental:free" #20
|
| 20 |
+
REKA_FLASH = "rekaai/reka-flash-3:free" #21
|
| 21 |
+
OPEN_CHAT = "openchat/openchat-7b:free" #21
|
| 22 |
+
TOPPY = "undi95/toppy-m-7b:free" #23
|
| 23 |
+
MOONLIGHT = "moonshotai/moonlight-16b-a3b-instruct:free"
|
| 24 |
+
|
| 25 |
+
CONCISE_ENGLISH_PROMPT = "Answer in short and precise English sentences."
|
| 26 |
+
|
| 27 |
+
def get_model(title, dev, model, name, user_input, system_prompt):
|
| 28 |
+
if user_input.lower() == "data":
|
| 29 |
+
df = get_data()
|
| 30 |
+
return df
|
| 31 |
+
|
| 32 |
+
if user_input.lower() == "text":
|
| 33 |
+
text = get_text()
|
| 34 |
+
return text
|
| 35 |
+
|
| 36 |
+
if name == "" or name is None:
|
| 37 |
+
return "Enter Your Name !"
|
| 38 |
+
|
| 39 |
+
if model is None or model == "":
|
| 40 |
+
return "Select AI Model !"
|
| 41 |
+
|
| 42 |
+
chain = ModelChain()
|
| 43 |
+
prompt = system_prompt + " " + CONCISE_ENGLISH_PROMPT
|
| 44 |
+
|
| 45 |
+
# Check the model and map to the correct model
|
| 46 |
+
if "ChatGPT" == model: #1
|
| 47 |
+
return chain.generate_response(GPT_4, name, user_input, prompt)
|
| 48 |
+
elif "Phi-4" == model: #2
|
| 49 |
+
return chain.generate_response(PHI_4, name, user_input, prompt)
|
| 50 |
+
elif "Phi-3" == model: #3
|
| 51 |
+
return chain.generate_response(PHI_3, name, user_input, prompt)
|
| 52 |
+
elif "Gemma-3" == model: #4
|
| 53 |
+
return chain.generate_response(GEMMA_3_27B, name, user_input, prompt)
|
| 54 |
+
elif "Gemini-2-Flash-Think" == model: #6
|
| 55 |
+
return chain.generate_response(GEMIN_FLASH_THINK, name, user_input, prompt)
|
| 56 |
+
elif "Gemini-Flash" == model: #7
|
| 57 |
+
return chain.generate_response(GEMIN_FLASH, name, user_input, prompt)
|
| 58 |
+
elif "QwQ-32B" == model: #8
|
| 59 |
+
return chain.generate_response(QWEN_32B, name, user_input, prompt)
|
| 60 |
+
elif "Qwen2.5" == model: #10
|
| 61 |
+
return chain.generate_response(QWEN_25, name, user_input, prompt)
|
| 62 |
+
elif "DeepSeek-R1" == model: #11
|
| 63 |
+
return chain.generate_response(DEEPSEEK_R1, name, user_input, prompt)
|
| 64 |
+
elif "DeepSeek-R1-Zero" == model: #11
|
| 65 |
+
return chain.generate_response(DEEPSEEK_R1, name, user_input, prompt)
|
| 66 |
+
elif "Llama-3.3" == model: #14
|
| 67 |
+
return chain.generate_response(META_LLAMA_MODEL, name, user_input, prompt)
|
| 68 |
+
elif "Mistral-Small" == model: #15
|
| 69 |
+
return chain.generate_response(MISTRAL_SMALL_MODEL, name, user_input, prompt)
|
| 70 |
+
elif "Mistral-Nemo" == model: #16
|
| 71 |
+
return chain.generate_response(MISTRAL_NEMO, name, user_input, prompt)
|
| 72 |
+
elif "Zephyr" == model: #17
|
| 73 |
+
return chain.generate_response(ZEPHYR, name, user_input, prompt)
|
| 74 |
+
elif "Olympic-Coder" == model: #19
|
| 75 |
+
return chain.generate_response(OLYMPIC_CODER, name, user_input, prompt)
|
| 76 |
+
elif "LearnLM" == model: #20
|
| 77 |
+
return chain.generate_response(LEARN, name, user_input, prompt)
|
| 78 |
+
elif "Reka-Flash" == model: #21
|
| 79 |
+
return chain.generate_response(REKA_FLASH, name, user_input, prompt)
|
| 80 |
+
elif "OpenChat" == model: #21
|
| 81 |
+
return chain.generate_response(OPEN_CHAT, name, user_input, prompt)
|
| 82 |
+
elif "Toppy" == model: #21
|
| 83 |
+
return chain.generate_response(TOPPY, name, user_input, prompt)
|
| 84 |
+
elif "MoonLight" == model: #21
|
| 85 |
+
return chain.generate_response(MOONLIGHT, name, user_input, prompt)
|
| 86 |
+
else:
|
| 87 |
+
return "Invalid Model Name : " + model
|
| 88 |
+
|
| 89 |
+
def main():
|
| 90 |
+
view = gr.Interface(
|
| 91 |
+
fn= get_model,
|
| 92 |
+
inputs = [
|
| 93 |
+
gr.Markdown("# Switch AI"),
|
| 94 |
+
gr.Markdown("### by Kalash"),
|
| 95 |
+
gr.Radio(
|
| 96 |
+
[
|
| 97 |
+
"ChatGPT", #1
|
| 98 |
+
"Phi-4", #2
|
| 99 |
+
"Phi-3", #3
|
| 100 |
+
"Gemma-3", #4
|
| 101 |
+
"Gemini-2-Flash-Think", #6
|
| 102 |
+
"Gemini-Flash", #7
|
| 103 |
+
"QwQ-32B", #9
|
| 104 |
+
"Qwen2.5", #11
|
| 105 |
+
"DeepSeek-R1", #12
|
| 106 |
+
"DeepSeek-R1-Zero", #12
|
| 107 |
+
"Llama-3.3", #15
|
| 108 |
+
"Mistral-Small", #16
|
| 109 |
+
"Mistral-Nemo", #17
|
| 110 |
+
"Zephyr", #18
|
| 111 |
+
"Olympic-Coder", #20
|
| 112 |
+
"LearnLM", #8
|
| 113 |
+
"Reka-Flash", #21
|
| 114 |
+
"OpenChat", #22
|
| 115 |
+
"Toppy", #22
|
| 116 |
+
"MoonLight", #22
|
| 117 |
+
],
|
| 118 |
+
label = "Choose AI Model", value = "ChatGPT"),
|
| 119 |
+
gr.Textbox(label = "Your Name", placeholder = "Enter Your Name"),
|
| 120 |
+
gr.Textbox(label = "Your Query", placeholder = "Enter Your Question"),
|
| 121 |
+
gr.Textbox(label = "System Prompt", placeholder = "Enter Custom System Propmt (Optional)"),
|
| 122 |
+
],
|
| 123 |
+
outputs = [gr.Textbox(label ="AI Response", lines = 25)],
|
| 124 |
+
flagging_mode = "never"
|
| 125 |
+
).launch(share=True)
|
| 126 |
+
# ).launch(share=False, server_port=54321)
|
| 127 |
+
|
| 128 |
+
if __name__ == '__main__':
|
| 129 |
+
main()
|
models.py
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from dotenv import load_dotenv
|
| 2 |
+
import os
|
| 3 |
+
import openai
|
| 4 |
+
import pandas as pd
|
| 5 |
+
|
| 6 |
+
load_dotenv()
|
| 7 |
+
path = os.getenv("DATA")
|
| 8 |
+
|
| 9 |
+
def add_new(model,name,query,prompt,answer):
|
| 10 |
+
data = {"name" : name, "model" : model, "query" : query, "prompt": prompt, "answer" : answer}
|
| 11 |
+
row = pd.DataFrame([data])
|
| 12 |
+
if not os.path.isfile(path):
|
| 13 |
+
os.makedirs("data")
|
| 14 |
+
with open(path, 'w') as file:
|
| 15 |
+
file.write("name,model,query,prompt,answer\n")
|
| 16 |
+
print("File Created")
|
| 17 |
+
old_df = pd.read_csv(path)
|
| 18 |
+
new_df = pd.concat([old_df, row], ignore_index=True)
|
| 19 |
+
new_df.to_csv(path, index=False)
|
| 20 |
+
print("Saved.")
|
| 21 |
+
|
| 22 |
+
def get_data():
|
| 23 |
+
df = pd.read_csv(path)
|
| 24 |
+
return df.to_string()
|
| 25 |
+
|
| 26 |
+
def get_text():
|
| 27 |
+
with open(path, 'r', encoding='utf-8') as file:
|
| 28 |
+
text = file.read()
|
| 29 |
+
return text
|
| 30 |
+
|
| 31 |
+
class ModelChain:
|
| 32 |
+
def __init__(self):
|
| 33 |
+
self.client = self.generate_client(os.getenv("OPENROUTER_API_KEY"), os.getenv("BASE_URL"))
|
| 34 |
+
self.deepseek_messages = []
|
| 35 |
+
self.gemini_messages = []
|
| 36 |
+
|
| 37 |
+
def generate_client(self,api_key, url):
|
| 38 |
+
return openai.OpenAI(
|
| 39 |
+
api_key = api_key,
|
| 40 |
+
base_url = url,
|
| 41 |
+
)
|
| 42 |
+
|
| 43 |
+
def generate_response(self,model,name, query,prompt):
|
| 44 |
+
messages = [{"role":"system","content": prompt},
|
| 45 |
+
{"role":"user","content":query}]
|
| 46 |
+
try:
|
| 47 |
+
result = self.client.chat.completions.create(
|
| 48 |
+
model = model,
|
| 49 |
+
messages = messages,
|
| 50 |
+
)
|
| 51 |
+
answer = result.choices[0].message.content
|
| 52 |
+
add_new(model,name,query,prompt,answer)
|
| 53 |
+
return answer
|
| 54 |
+
except Exception as e:
|
| 55 |
+
print(f"Response Error : {e}")
|
| 56 |
+
if e == "'NoneType' object is not subscriptable":
|
| 57 |
+
return f"This AI Model might be Busy at the moment, try another AI Model"
|
| 58 |
+
else:
|
| 59 |
+
return f"Response Error : {e}"
|
| 60 |
+
|
| 61 |
+
def main():
|
| 62 |
+
chain= Modelschain()
|
| 63 |
+
|
| 64 |
+
if __name__ == "__main__":
|
| 65 |
+
main()
|
requirements.txt
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
huggingface_hub==0.25.2
|
| 2 |
+
gradio
|
| 3 |
+
openai
|
| 4 |
+
dotenv
|