Spaces:
Sleeping
Sleeping
File size: 1,513 Bytes
526927a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 |
version: '3.8'
services:
lily-llm-api-gpu:
build:
context: .
dockerfile: Dockerfile.gpu
container_name: lily-llm-api-gpu
ports:
- "8001:8001"
volumes:
- ./uploads:/app/uploads
- ./vector_stores:/app/vector_stores
- ./latex_ocr_faiss_stores:/app/latex_ocr_faiss_stores
- ./lily_llm_media:/app/lily_llm_media
- ./hearth_llm_model:/app/hearth_llm_model
environment:
- CUDA_VISIBLE_DEVICES=0
- PYTHONPATH=/app
- LILY_LLM_ENV=production
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [ gpu ]
restart: unless-stopped
networks:
- lily-network
# LaTeX-OCR ์ ์ฉ ์ปจํ
์ด๋ (CPU ๊ธฐ๋ฐ)
latex-ocr-service:
build:
context: .
dockerfile: Dockerfile.latex-ocr
container_name: latex-ocr-service
volumes:
- ./uploads:/app/uploads
- ./latex_ocr_faiss_stores:/app/latex_ocr_faiss_stores
environment:
- PYTHONPATH=/app
restart: unless-stopped
networks:
- lily-network
# Hearth Chat ์๋น์ค (๋ณ๋ ์ปจํ
์ด๋)
hearth-chat:
image: node:18-alpine
container_name: hearth-chat
working_dir: /app
volumes:
- ../hearth_chat_package:/app
ports:
- "8000:8000"
command: [ "npm", "start" ]
restart: unless-stopped
networks:
- lily-network
networks:
lily-network:
driver: bridge
volumes:
lily-data:
|