Spaces:
Sleeping
Sleeping
| import core | |
| import openai | |
| import models | |
| import time | |
| import gradio as gr | |
| import os | |
| import asyncio | |
| import time | |
| api_key = os.environ["OPENAI_API_KEY"] | |
| api_base = os.environ["OPENAI_API_BASE"] | |
| def chatbot_initialize(): | |
| retriever = core.retriever.ChromaRetriever(pdf_dir="", | |
| collection_name="pdfs_1000", | |
| split_args={"size": 2048, "overlap": 10}, #embedding_model="text-embedding-ada-002" | |
| embed_model=models.BiomedModel() | |
| ) | |
| Chatbot = core.chatbot.RetrievalChatbot(retriever=retriever) | |
| return Chatbot | |
| async def respond(query, chat_history, img_path_list, chat_history_string): | |
| time1 = time.time() | |
| global Chatbot | |
| result = await Chatbot.response(query, image_paths=img_path_list) | |
| response = result["answer"] | |
| logs = result["logs"] | |
| titles_set = result["titles"] | |
| titles = "\n".join(list(titles_set)) | |
| chat_history.append((query, response)) | |
| if img_path_list is None: | |
| chat_history_string += "Query: " + query + "\nImage: None" + "\nResponse: " + response + "\n\n\n" | |
| else: | |
| chat_history_string += "Query: " + query + "\nImages: " + "\n".join([path.name for path in img_path_list]) + "\nResponse: " + response + "\n\n\n" | |
| time2 = time.time() | |
| print(f"Total: {time2-time1}") | |
| return "", chat_history, chat_history_string | |
| if __name__ == "__main__": | |
| global Chatbot | |
| Chatbot=chatbot_initialize() | |
| with gr.Blocks() as demo: | |
| with gr.Row(): | |
| with gr.Column(scale=2): | |
| chatbot = gr.Chatbot() | |
| msg = gr.Textbox(label="Query", show_label=True) | |
| imgs = gr.File(file_count='multiple', file_types=['image'], type="filepath", label='Upload Images') | |
| clear = gr.ClearButton([msg, chatbot]) | |
| with gr.Column(scale=1): | |
| # titles = gr.Textbox(label="Referenced Article Titles", show_label=True, show_copy_button=True, interactive=False) | |
| history = gr.Textbox(label="Copy Chat History", show_label=True, show_copy_button=True, interactive=False, max_lines=5) | |
| msg.submit(respond, inputs=[msg, chatbot, imgs, history], outputs=[msg, chatbot, history]) | |
| demo.queue().launch() |