shoom013 commited on
Commit
f19c02a
·
verified ·
1 Parent(s): 9717d8b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -25,8 +25,8 @@ from io import StringIO
25
  last = 0
26
  CHROMA_DATA_PATH = "chroma_data/"
27
  EMBED_MODEL = "sentence-transformers/all-MiniLM-L6-v2" #"BAAI/bge-m3"
28
- #LLM_NAME = "mistralai/Mistral-Nemo-Instruct-2407"
29
- LLM_NAME = "W4D/YugoGPT-7B-Instruct-GGUF"
30
  # all-MiniLM-L6-v2
31
  CHUNK_SIZE = 800
32
  CHUNK_OVERLAP = 50
@@ -76,11 +76,11 @@ Settings.llm = HuggingFaceInferenceAPI(model_name=LLM_NAME,
76
  # stopping_ids=[50278, 50279, 50277, 1, 0],
77
  generate_kwargs={"temperature": temperature, "top_p":top_p, "repetition_penalty": repetition_penalty,
78
  "presence_penalty": presence_penalty, "frequency_penalty": frequency_penalty,
79
- "top_k": top_k, "do_sample": False},
80
  # tokenizer_kwargs={"max_length": 4096},
81
  tokenizer_name=LLM_NAME,
82
  # provider="auto",
83
- task="None"
84
  )
85
  # "BAAI/bge-m3"
86
  Settings.embed_model = HuggingFaceEmbedding(model_name="sentence-transformers/all-MiniLM-L6-v2")
 
25
  last = 0
26
  CHROMA_DATA_PATH = "chroma_data/"
27
  EMBED_MODEL = "sentence-transformers/all-MiniLM-L6-v2" #"BAAI/bge-m3"
28
+ LLM_NAME = "mistralai/Mistral-Nemo-Instruct-2407"
29
+ #LLM_NAME = "W4D/YugoGPT-7B-Instruct-GGUF"
30
  # all-MiniLM-L6-v2
31
  CHUNK_SIZE = 800
32
  CHUNK_OVERLAP = 50
 
76
  # stopping_ids=[50278, 50279, 50277, 1, 0],
77
  generate_kwargs={"temperature": temperature, "top_p":top_p, "repetition_penalty": repetition_penalty,
78
  "presence_penalty": presence_penalty, "frequency_penalty": frequency_penalty,
79
+ "top_k": top_k, "do_sample": False, "task": None},
80
  # tokenizer_kwargs={"max_length": 4096},
81
  tokenizer_name=LLM_NAME,
82
  # provider="auto",
83
+ # task="None"
84
  )
85
  # "BAAI/bge-m3"
86
  Settings.embed_model = HuggingFaceEmbedding(model_name="sentence-transformers/all-MiniLM-L6-v2")