#!/usr/bin/env python3 """ Docking@H║ ║ �🧬 AutoDock Suite 4.2.6 Integration ║ ║ 🚀 CUDA/CUDPP GPU Acceleration ║ ║ 🌐 BOINC Distributed Computing ║ ║ 🔗 Decentralized Internet SDK ║ ║ 🤖 Cloud Agents AI Orchestration ║oDock Suite 4.2.6 Integration ║ ║ 🚀 CUDA/CUDPP GPU Acceleration ║ ║ 🌐 BOINC Distributed Computing ║ ║ 🔗 Decentralized Internet SDK ║ ║ 🤖 Cloud Agents AI Orchestration ║ Complete Launch Script This script starts the full Docking@HOME platform including: - AutoDock GPU integration - Web-based GUI - Real-time job monitoring - Distributed computing support Authors: OpenPeer AI, Riemann Computing Inc., Bleunomics, Andrew Magdy Kamal """ import sys import asyncio import argparse from pathlib import Path # Add parent directory to path sys.path.insert(0, str(Path(__file__).parent)) from docking_at_home.gui import start_gui def print_banner(): """Print startup banner""" banner = """ ╔═══════════════════════════════════════════════════════════════════════╗ ║ ║ ║ Docking@HOME v1.0 ║ ║ ║ ║ Distributed Molecular Docking with GPU Acceleration ║ ║ ║ ║ Authors: OpenPeer AI ║ ║ Riemann Computing Inc. ║ ║ Bleunomics ║ ║ Andrew Magdy Kamal ║ ║ ║ ║ 🧬 AutoDock Suite 4.2.6 Integration ║ ║ 🚀 CUDA/CUDPP GPU Acceleration ║ ║ 🌐 BOINC Distributed Computing ║ ║ 🔗 The Decentralized Internet SDK ║ ║ 🤖 Cloud Agents AI Orchestration ║ ║ ║ ╚═══════════════════════════════════════════════════════════════════════╝ """ print(banner) def main(): """Main entry point""" parser = argparse.ArgumentParser( description="Docking@HOME - Distributed Molecular Docking Platform", formatter_class=argparse.RawDescriptionHelpFormatter, epilog=""" Examples: # Start GUI server (default) python start.py # Start on custom host/port python start.py --host 0.0.0.0 --port 8888 # Show help python start.py --help Support: 📧 Email: andrew@bleunomics.com 🤗 Issues: https://huggingface.co/OpenPeerAI/DockingAtHOME/discussions 📚 Docs: https://huggingface.co/OpenPeerAI/DockingAtHOME """ ) parser.add_argument( '--host', default='localhost', help='Host to bind the server to (default: localhost)' ) parser.add_argument( '--port', type=int, default=8080, help='Port to bind the server to (default: 8080)' ) parser.add_argument( '--workers', type=int, default=2, help='Number of concurrent worker tasks (default: 2)' ) parser.add_argument( '--debug', action='store_true', help='Enable debug mode' ) parser.add_argument( '--no-browser', action='store_true', help="Don't automatically open browser" ) args = parser.parse_args() # Print banner print_banner() # Check dependencies try: import fastapi import uvicorn import websockets except ImportError as e: print("❌ Missing required dependencies!") print("\nPlease install:") print(" pip install -r requirements.txt") print("\nOr install manually:") print(" pip install fastapi uvicorn[standard] websockets python-multipart") sys.exit(1) # Check AutoDock import shutil autodock_found = False for exe in ['autodock_gpu', 'autodock4', 'autodock']: if shutil.which(exe): print(f"✅ Found AutoDock: {exe}") autodock_found = True break if not autodock_found: print("⚠️ AutoDock not found in PATH") print(" Running in simulation mode") print(" To use real AutoDock, install from:") print(" https://autodock.scripps.edu/") # Check CUDA if shutil.which('nvidia-smi'): print("✅ CUDA GPU detected") else: print("⚠️ CUDA not detected - CPU mode only") print("\n" + "="*70) print(f"🚀 Starting server on http://{args.host}:{args.port}") print("="*70 + "\n") # Open browser if not args.no_browser: import webbrowser import threading def open_browser(): import time time.sleep(1.5) # Wait for server to start webbrowser.open(f"http://{args.host}:{args.port}") threading.Thread(target=open_browser, daemon=True).start() # Start GUI server try: start_gui(host=args.host, port=args.port) except KeyboardInterrupt: print("\n\n👋 Server stopped. Thank you for using Docking@HOME!") except Exception as e: print(f"\n❌ Error: {e}") if args.debug: raise sys.exit(1) if __name__ == "__main__": main()