Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -7,54 +7,54 @@ app = FastAPI()
|
|
| 7 |
|
| 8 |
# Define available models (you can expand this list)
|
| 9 |
AVAILABLE_MODELS = {
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
}
|
| 57 |
|
|
|
|
| 58 |
async def generate_ai_response(prompt: str, model: str):
|
| 59 |
# Configuration for unofficial GitHub AI endpoint
|
| 60 |
token = os.getenv("GITHUB_TOKEN")
|
|
|
|
| 7 |
|
| 8 |
# Define available models (you can expand this list)
|
| 9 |
AVAILABLE_MODELS = {
|
| 10 |
+
"openai/gpt-4.1": "OpenAI GPT-4.1",
|
| 11 |
+
"openai/gpt-4.1-mini": "OpenAI GPT-4.1-mini",
|
| 12 |
+
"openai/gpt-4.1-nano": "OpenAI GPT-4.1-nano",
|
| 13 |
+
"openai/gpt-4o": "OpenAI GPT-4o",
|
| 14 |
+
"openai/gpt-4o-mini": "OpenAI GPT-4o mini",
|
| 15 |
+
"openai/o4-mini": "OpenAI o4-mini",
|
| 16 |
+
"microsoft/MAI-DS-R1": "MAI-DS-R1",
|
| 17 |
+
"microsoft/Phi-3.5-MoE-instruct": "Phi-3.5-MoE instruct (128k)",
|
| 18 |
+
"microsoft/Phi-3.5-mini-instruct": "Phi-3.5-mini instruct (128k)",
|
| 19 |
+
"microsoft/Phi-3.5-vision-instruct": "Phi-3.5-vision instruct (128k)",
|
| 20 |
+
"microsoft/Phi-3-medium-128k-instruct": "Phi-3-medium instruct (128k)",
|
| 21 |
+
"microsoft/Phi-3-medium-4k-instruct": "Phi-3-medium instruct (4k)",
|
| 22 |
+
"microsoft/Phi-3-mini-128k-instruct": "Phi-3-mini instruct (128k)",
|
| 23 |
+
"microsoft/Phi-3-small-128k-instruct": "Phi-3-small instruct (128k)",
|
| 24 |
+
"microsoft/Phi-3-small-8k-instruct": "Phi-3-small instruct (8k)",
|
| 25 |
+
"microsoft/Phi-4": "Phi-4",
|
| 26 |
+
"microsoft/Phi-4-mini-instruct": "Phi-4-mini-instruct",
|
| 27 |
+
"microsoft/Phi-4-multimodal-instruct": "Phi-4-multimodal-instruct",
|
| 28 |
+
"ai21-labs/AI21-Jamba-1.5-Large": "AI21 Jamba 1.5 Large",
|
| 29 |
+
"ai21-labs/AI21-Jamba-1.5-Mini": "AI21 Jamba 1.5 Mini",
|
| 30 |
+
"mistral-ai/Codestral-2501": "Codestral 25.01",
|
| 31 |
+
"cohere/Cohere-command-r": "Cohere Command R",
|
| 32 |
+
"cohere/Cohere-command-r-08-2024": "Cohere Command R 08-2024",
|
| 33 |
+
"cohere/Cohere-command-r-plus": "Cohere Command R+",
|
| 34 |
+
"cohere/Cohere-command-r-plus-08-2024": "Cohere Command R+ 08-2024",
|
| 35 |
+
"deepseek/DeepSeek-R1": "DeepSeek-R1",
|
| 36 |
+
"deepseek/DeepSeek-V3-0324": "DeepSeek-V3-0324",
|
| 37 |
+
"meta/Llama-3.2-11B-Vision-Instruct": "Llama-3.2-11B-Vision-Instruct",
|
| 38 |
+
"meta/Llama-3.2-90B-Vision-Instruct": "Llama-3.2-90B-Vision-Instruct",
|
| 39 |
+
"meta/Llama-3.3-70B-Instruct": "Llama-3.3-70B-Instruct",
|
| 40 |
+
"meta/Llama-4-Maverick-17B-128E-Instruct-FP8": "Llama 4 Maverick 17B 128E Instruct FP8",
|
| 41 |
+
"meta/Llama-4-Scout-17B-16E-Instruct": "Llama 4 Scout 17B 16E Instruct",
|
| 42 |
+
"meta/Meta-Llama-3.1-405B-Instruct": "Meta-Llama-3.1-405B-Instruct",
|
| 43 |
+
"meta/Meta-Llama-3.1-70B-Instruct": "Meta-Llama-3.1-70B-Instruct",
|
| 44 |
+
"meta/Meta-Llama-3.1-8B-Instruct": "Meta-Llama-3.1-8B-Instruct",
|
| 45 |
+
"meta/Meta-Llama-3-70B-Instruct": "Meta-Llama-3-70B-Instruct",
|
| 46 |
+
"meta/Meta-Llama-3-8B-Instruct": "Meta-Llama-3-8B-Instruct",
|
| 47 |
+
"mistral-ai/Ministral-3B": "Ministral 3B",
|
| 48 |
+
"mistral-ai/Mistral-Large-2411": "Mistral Large 24.11",
|
| 49 |
+
"mistral-ai/Mistral-Nemo": "Mistral Nemo",
|
| 50 |
+
"mistral-ai/Mistral-large-2407": "Mistral Large (2407)",
|
| 51 |
+
"mistral-ai/Mistral-small": "Mistral Small",
|
| 52 |
+
"cohere/cohere-command-a": "Cohere Command A",
|
| 53 |
+
"core42/jais-30b-chat": "JAIS 30b Chat",
|
| 54 |
+
"mistral-ai/mistral-small-2503": "Mistral Small 3.1"
|
|
|
|
| 55 |
}
|
| 56 |
|
| 57 |
+
|
| 58 |
async def generate_ai_response(prompt: str, model: str):
|
| 59 |
# Configuration for unofficial GitHub AI endpoint
|
| 60 |
token = os.getenv("GITHUB_TOKEN")
|