NIRAJz commited on
Commit
ebd1770
·
verified ·
1 Parent(s): 68d56ed

Update config.py

Browse files
Files changed (1) hide show
  1. config.py +43 -46
config.py CHANGED
@@ -1,47 +1,44 @@
1
- import os
2
- from dotenv import load_dotenv
3
- from pydantic_settings import BaseSettings
4
-
5
- load_dotenv()
6
-
7
- class Settings(BaseSettings):
8
- # API Keys
9
- GROQ_API_KEY: str = os.getenv("GROQ_API_KEY", "")
10
- OPENAI_API_KEY: str = os.getenv("OPENAI_API_KEY", "")
11
-
12
- # Default models
13
- DEFAULT_GROQ_MODEL: str = os.getenv("DEFAULT_GROQ_MODEL", "openai/gpt-oss-20b")
14
- DEFAULT_OPENAI_MODEL: str = os.getenv("DEFAULT_OPENAI_MODEL", "gpt-4o")
15
-
16
- # Available models
17
- AVAILABLE_GROQ_MODELS: list = [
18
- "openai/gpt-oss-20b",
19
- "openai/gpt-oss-120b",
20
- "llama-3.3-70b-versatile",
21
- "llama-3.1-8b-instant",
22
- "meta-llama/llama-guard-4-12b"
23
- ]
24
-
25
- AVAILABLE_OPENAI_MODELS: list = [
26
- "gpt-4o",
27
- "gpt-4-turbo",
28
- "gpt-4",
29
- "gpt-3.5-turbo"
30
- ]
31
-
32
- # Evaluation settings
33
- DEFAULT_METRICS: list = ["accuracy", "faithfulness", "relevance", "toxicity"]
34
- CACHE_ENABLED: bool = os.getenv("CACHE_ENABLED", "True").lower() == "true"
35
- CACHE_DIR: str = "./.cache"
36
-
37
- # LangGraph settings
38
- MAX_CONCURRENT: int = 5
39
- TIMEOUT: int = 30
40
-
41
- # API Provider
42
- DEFAULT_API_PROVIDER: str = os.getenv("DEFAULT_API_PROVIDER", "groq")
43
-
44
- class Config:
45
- env_file = ".env"
46
-
47
  settings = Settings()
 
1
+ import os
2
+ from dotenv import load_dotenv
3
+ from pydantic_settings import BaseSettings
4
+
5
+ load_dotenv()
6
+
7
+ class Settings(BaseSettings):
8
+ # Groq API
9
+ GROQ_API_KEY: str = os.getenv("GROQ_API_KEY", "")
10
+ OPENAI_API_KEY: str = os.getenv("OPENAI_API_KEY", "")
11
+ DEFAULT_MODEL: str = os.getenv("DEFAULT_MODEL", "openai/gpt-oss-20b")
12
+
13
+ # Available Groq models
14
+ AVAILABLE_GROQ_MODELS: list = [
15
+ "openai/gpt-oss-20b",
16
+ "openai/gpt-oss-120b",
17
+ "llama-3.3-70b-versatile",
18
+ "llama-3.1-8b-instant",
19
+ "meta-llama/llama-guard-4-12b"
20
+ ]
21
+
22
+ AVAILABLE_OPENAI_MODELS: list = [
23
+ "gpt-4o",
24
+ "gpt-4-turbo",
25
+ "gpt-4",
26
+ "gpt-3.5-turbo"
27
+ ]
28
+
29
+ # Evaluation settings - Use /tmp for cache in Spaces
30
+ DEFAULT_METRICS: list = ["accuracy", "faithfulness", "relevance", "toxicity"]
31
+ CACHE_ENABLED: bool = os.getenv("CACHE_ENABLED", "True").lower() == "true"
32
+ CACHE_DIR: str = os.getenv("CACHE_DIR", "/tmp/.cache" if os.path.exists("/tmp") else "./.cache")
33
+
34
+ # API Provider
35
+ DEFAULT_API_PROVIDER: str = os.getenv("DEFAULT_API_PROVIDER", "groq")
36
+
37
+ # LangGraph settings
38
+ MAX_CONCURRENT: int = 5
39
+ TIMEOUT: int = 30
40
+
41
+ class Config:
42
+ env_file = ".env"
43
+
 
 
 
44
  settings = Settings()