Removed Semantic Scholar dependency and all related code references
Browse files- app.py +1 -25
- requirements.txt +0 -1
- services/research/papers.py +0 -32
    	
        app.py
    CHANGED
    
    | @@ -14,7 +14,6 @@ from core.session import session_manager | |
| 14 | 
             
            from core.memory import check_redis_health
         | 
| 15 | 
             
            from core.coordinator import coordinator
         | 
| 16 | 
             
            from core.errors import translate_error
         | 
| 17 | 
            -
            from services.research.papers import find_papers
         | 
| 18 | 
             
            import logging
         | 
| 19 |  | 
| 20 | 
             
            # Set up logging
         | 
| @@ -456,27 +455,6 @@ with tab1: | |
| 456 | 
             
                            if any(word in final_prompt.lower() for word in ["vitamin", "drug", "metformin", "CRISPR"]):
         | 
| 457 | 
             
                                tags.append("🧬 Scientific Knowledge")
         | 
| 458 | 
             
                            st.write(", ".join(tags) if tags else "General Knowledge")
         | 
| 459 | 
            -
                            
         | 
| 460 | 
            -
                            # Show research papers if scientific topic
         | 
| 461 | 
            -
                            research_keywords = ["study", "research", "paper", "effectiveness", "clinical trial", "vitamin", "drug", "metformin", "CRISPR"]
         | 
| 462 | 
            -
                            if any(kw in final_prompt.lower() for kw in research_keywords):
         | 
| 463 | 
            -
                                st.markdown("**Related Research Papers:**")
         | 
| 464 | 
            -
                                with st.spinner("Searching academic databases..."):
         | 
| 465 | 
            -
                                    try:
         | 
| 466 | 
            -
                                        papers = find_papers(final_prompt, limit=3)
         | 
| 467 | 
            -
                                        if papers:
         | 
| 468 | 
            -
                                            for i, paper in enumerate(papers):
         | 
| 469 | 
            -
                                                with st.expander(f"📄 {paper['title'][:60]}..."):
         | 
| 470 | 
            -
                                                    st.markdown(f"**Authors:** {', '.join(paper['authors'][:3])}")
         | 
| 471 | 
            -
                                                    st.markdown(f"**Year:** {paper['year']}")
         | 
| 472 | 
            -
                                                    st.markdown(f"**Citations:** {paper['citation_count']}")
         | 
| 473 | 
            -
                                                    st.markdown(f"**Venue:** {paper['venue']}")
         | 
| 474 | 
            -
                                                    st.markdown(f"**Abstract:** {paper['abstract'][:200]}...")
         | 
| 475 | 
            -
                                                    st.markdown(f"[View Full Paper]({paper['url']})")
         | 
| 476 | 
            -
                                        else:
         | 
| 477 | 
            -
                                            st.info("No relevant academic papers found for this topic.")
         | 
| 478 | 
            -
                                    except Exception as e:
         | 
| 479 | 
            -
                                        st.warning(f"Could not fetch research papers: {translate_error(e)}")
         | 
| 480 |  | 
| 481 | 
             
                        except Exception as e:
         | 
| 482 | 
             
                            st.error(f"Evaluation failed: {translate_error(e)}")
         | 
| @@ -604,18 +582,16 @@ with tab3: | |
| 604 | 
             
                ### 🧠 Core Features
         | 
| 605 | 
             
                - **Multi-model coordination**: Combines local Ollama models with cloud-based Hugging Face endpoints
         | 
| 606 | 
             
                - **Live web search**: Integrates with Tavily API for current information
         | 
| 607 | 
            -
                - **Academic research**: Accesses peer-reviewed papers via Semantic Scholar
         | 
| 608 | 
             
                - **Persistent memory**: Uses Redis for conversation history storage
         | 
| 609 | 
             
                - **Hierarchical reasoning**: Fast local responses with deep cloud analysis
         | 
| 610 |  | 
| 611 | 
             
                ### 🛠️ Technical Architecture
         | 
| 612 | 
             
                - **Primary model**: Ollama (local processing for fast responses)
         | 
| 613 | 
             
                - **Secondary model**: Hugging Face Inference API (deep analysis)
         | 
| 614 | 
            -
                - **External data**: Web search | 
| 615 | 
             
                - **Memory system**: Redis-based session management
         | 
| 616 |  | 
| 617 | 
             
                ### 📊 Evaluation Tools
         | 
| 618 | 
             
                - Behavior testing with sample prompts
         | 
| 619 | 
             
                - Performance metrics and analytics
         | 
| 620 | 
            -
                - Research paper integration for scientific topics
         | 
| 621 | 
             
                """)
         | 
|  | |
| 14 | 
             
            from core.memory import check_redis_health
         | 
| 15 | 
             
            from core.coordinator import coordinator
         | 
| 16 | 
             
            from core.errors import translate_error
         | 
|  | |
| 17 | 
             
            import logging
         | 
| 18 |  | 
| 19 | 
             
            # Set up logging
         | 
|  | |
| 455 | 
             
                            if any(word in final_prompt.lower() for word in ["vitamin", "drug", "metformin", "CRISPR"]):
         | 
| 456 | 
             
                                tags.append("🧬 Scientific Knowledge")
         | 
| 457 | 
             
                            st.write(", ".join(tags) if tags else "General Knowledge")
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 458 |  | 
| 459 | 
             
                        except Exception as e:
         | 
| 460 | 
             
                            st.error(f"Evaluation failed: {translate_error(e)}")
         | 
|  | |
| 582 | 
             
                ### 🧠 Core Features
         | 
| 583 | 
             
                - **Multi-model coordination**: Combines local Ollama models with cloud-based Hugging Face endpoints
         | 
| 584 | 
             
                - **Live web search**: Integrates with Tavily API for current information
         | 
|  | |
| 585 | 
             
                - **Persistent memory**: Uses Redis for conversation history storage
         | 
| 586 | 
             
                - **Hierarchical reasoning**: Fast local responses with deep cloud analysis
         | 
| 587 |  | 
| 588 | 
             
                ### 🛠️ Technical Architecture
         | 
| 589 | 
             
                - **Primary model**: Ollama (local processing for fast responses)
         | 
| 590 | 
             
                - **Secondary model**: Hugging Face Inference API (deep analysis)
         | 
| 591 | 
            +
                - **External data**: Web search and weather data
         | 
| 592 | 
             
                - **Memory system**: Redis-based session management
         | 
| 593 |  | 
| 594 | 
             
                ### 📊 Evaluation Tools
         | 
| 595 | 
             
                - Behavior testing with sample prompts
         | 
| 596 | 
             
                - Performance metrics and analytics
         | 
|  | |
| 597 | 
             
                """)
         | 
    	
        requirements.txt
    CHANGED
    
    | @@ -10,4 +10,3 @@ docker==6.1.3 | |
| 10 | 
             
            pygame==2.5.2
         | 
| 11 | 
             
            pydantic==1.10.7
         | 
| 12 | 
             
            typing-extensions>=4.5.0
         | 
| 13 | 
            -
            semanticscholar>=0.1.8
         | 
|  | |
| 10 | 
             
            pygame==2.5.2
         | 
| 11 | 
             
            pydantic==1.10.7
         | 
| 12 | 
             
            typing-extensions>=4.5.0
         | 
|  | 
    	
        services/research/papers.py
    DELETED
    
    | @@ -1,32 +0,0 @@ | |
| 1 | 
            -
            from semanticscholar import SemanticScholar
         | 
| 2 | 
            -
            import logging
         | 
| 3 | 
            -
             | 
| 4 | 
            -
            logger = logging.getLogger(__name__)
         | 
| 5 | 
            -
             | 
| 6 | 
            -
            scholar = SemanticScholar(timeout=10)
         | 
| 7 | 
            -
             | 
| 8 | 
            -
            def find_papers(query: str, limit: int = 5):
         | 
| 9 | 
            -
                """
         | 
| 10 | 
            -
                Search academic papers via Semantic Scholar API.
         | 
| 11 | 
            -
                Returns simplified paper metadata.
         | 
| 12 | 
            -
                """
         | 
| 13 | 
            -
                try:
         | 
| 14 | 
            -
                    results = scholar.search_paper(query, limit=limit)
         | 
| 15 | 
            -
                    papers = []
         | 
| 16 | 
            -
                    for paper in results:
         | 
| 17 | 
            -
                        papers.append({
         | 
| 18 | 
            -
                            'title': paper.title,
         | 
| 19 | 
            -
                            'authors': [author.name for author in paper.authors],
         | 
| 20 | 
            -
                            'abstract': paper.abstract,
         | 
| 21 | 
            -
                            'year': paper.year,
         | 
| 22 | 
            -
                            'url': paper.url,
         | 
| 23 | 
            -
                            'citation_count': getattr(paper, 'citationCount', 0),
         | 
| 24 | 
            -
                            'venue': getattr(paper, 'venue', '')
         | 
| 25 | 
            -
                        })
         | 
| 26 | 
            -
                    return papers
         | 
| 27 | 
            -
                except Exception as e:
         | 
| 28 | 
            -
                    logger.error(f"Paper search failed: {e}")
         | 
| 29 | 
            -
                    return []
         | 
| 30 | 
            -
             | 
| 31 | 
            -
            # Example usage:
         | 
| 32 | 
            -
            # papers = find_papers("vitamin D immune system")
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
