GitHub Actions
commited on
Commit
·
7c4d825
0
Parent(s):
Deploy to Hugging Face Space: product-image-update-port-1
Browse files- .gitattributes +72 -0
- Dockerfile +116 -0
- README.md +0 -0
- app.py +0 -0
- requirements.txt +0 -0
- src/api/__init__.py +0 -0
- src/api/gpu_functions.py +0 -0
- src/api/routes.py +0 -0
- src/config/__init__.py +0 -0
- src/config/constants.py +0 -0
- src/models/__init__.py +0 -0
- src/models/model_loader.py +0 -0
- src/pipeline/__init__.py +0 -0
- src/pipeline/executor.py +0 -0
- src/pipeline/pipeline_steps.py +0 -0
- src/processing/bounding_box/bounding_box.py +0 -0
- src/processing/bounding_box/flow_diagram.mermaid +98 -0
- src/processing/bounding_box/head_model.py +0 -0
- src/processing/bounding_box/rtdetr_model.py +0 -0
- src/processing/bounding_box/yolos_fashionpedia_model.py +0 -0
- src/processing/cropping_padding/cropping_padding.py +0 -0
- src/processing/cropping_padding/flow_diagram.mermaid +224 -0
- src/processing/image_download/flow_diagram.md +0 -0
- src/processing/image_download/flow_diagram.mermaid +0 -0
- src/processing/image_download/image_download.py +0 -0
- src/processing/remove_background/remove_background_BiRefNet.py +0 -0
- src/processing/remove_background/remove_background_RMBG_2_0.py +0 -0
- src/processing/return_images/enhanced_color_detection.py +0 -0
- src/processing/return_images/fashion_clip_model.py +0 -0
- src/processing/return_images/flow_diagram.mermaid +61 -0
- src/processing/return_images/return_images.py +0 -0
- src/processing/return_images/segment_for_color.py +0 -0
- src/processing/return_images/timm_resnet50_a2_denim_extractor.py +0 -0
- src/processing/return_images/timm_resnet50_color.py +0 -0
- src/processing/under_development/under_development.py +0 -0
- src/utils/__init__.py +0 -0
- src/utils/context_utils.py +0 -0
- src/utils/logging_utils.py +0 -0
- tests/__init__.py +0 -0
- tests/config.py +0 -0
- tests/test_full_pipeline.py +0 -0
- tests/test_quota_handling.py +0 -0
.gitattributes
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Standard LFS patterns
|
| 2 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
|
| 38 |
+
# Model-specific patterns
|
| 39 |
+
models/**/*.bin filter=lfs diff=lfs merge=lfs -text
|
| 40 |
+
models/**/*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 41 |
+
models/**/*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 42 |
+
models/**/*.pt filter=lfs diff=lfs merge=lfs -text
|
| 43 |
+
models/**/*.pth filter=lfs diff=lfs merge=lfs -text
|
| 44 |
+
|
| 45 |
+
# Cache patterns
|
| 46 |
+
.cache/**/*.bin filter=lfs diff=lfs merge=lfs -text
|
| 47 |
+
.cache/**/*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 48 |
+
.cache/**/*.json filter=lfs diff=lfs merge=lfs -text
|
| 49 |
+
|
| 50 |
+
# Image patterns (for sample images)
|
| 51 |
+
*.jpg filter=lfs diff=lfs merge=lfs -text
|
| 52 |
+
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 53 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
| 54 |
+
*.webp filter=lfs diff=lfs merge=lfs -text
|
| 55 |
+
*.gif filter=lfs diff=lfs merge=lfs -text
|
| 56 |
+
*.bmp filter=lfs diff=lfs merge=lfs -text
|
| 57 |
+
|
| 58 |
+
# Video patterns (if needed)
|
| 59 |
+
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 60 |
+
*.avi filter=lfs diff=lfs merge=lfs -text
|
| 61 |
+
*.mov filter=lfs diff=lfs merge=lfs -text
|
| 62 |
+
*.webm filter=lfs diff=lfs merge=lfs -text
|
| 63 |
+
|
| 64 |
+
# Text files (not LFS)
|
| 65 |
+
*.txt -filter=lfs -diff=lfs -merge=lfs text
|
| 66 |
+
*.md -filter=lfs -diff=lfs -merge=lfs text
|
| 67 |
+
*.json -filter=lfs -diff=lfs -merge=lfs text
|
| 68 |
+
*.py -filter=lfs -diff=lfs -merge=lfs text
|
| 69 |
+
*.yml -filter=lfs -diff=lfs -merge=lfs text
|
| 70 |
+
*.yaml -filter=lfs -diff=lfs -merge=lfs text
|
| 71 |
+
requirements.txt -filter=lfs -diff=lfs -merge=lfs text
|
| 72 |
+
README.md -filter=lfs -diff=lfs -merge=lfs text
|
Dockerfile
ADDED
|
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version: '3.8'
|
| 2 |
+
|
| 3 |
+
services:
|
| 4 |
+
product-image-update:
|
| 5 |
+
build:
|
| 6 |
+
context: .
|
| 7 |
+
dockerfile: Dockerfile
|
| 8 |
+
ports:
|
| 9 |
+
- "7860:7860"
|
| 10 |
+
environment:
|
| 11 |
+
# Copy from .env file or set directly
|
| 12 |
+
- HF_TOKEN=${HF_TOKEN}
|
| 13 |
+
- CUDA_VISIBLE_DEVICES=${CUDA_VISIBLE_DEVICES:-0}
|
| 14 |
+
- PYTORCH_CUDA_ALLOC_CONF=max_split_size_mb:512
|
| 15 |
+
- TRANSFORMERS_CACHE=/app/.cache/huggingface
|
| 16 |
+
- HF_HOME=/app/.cache/huggingface
|
| 17 |
+
- GRADIO_SERVER_NAME=0.0.0.0
|
| 18 |
+
- GRADIO_SERVER_PORT=7860
|
| 19 |
+
- DEBUG=${DEBUG:-false}
|
| 20 |
+
volumes:
|
| 21 |
+
# Mount cache directories for faster restarts
|
| 22 |
+
- huggingface-cache:/app/.cache/huggingface
|
| 23 |
+
- torch-cache:/app/.cache/torch
|
| 24 |
+
- models-cache:/app/models
|
| 25 |
+
# Mount processed images directory
|
| 26 |
+
- ./processed_imgs:/app/processed_imgs
|
| 27 |
+
# For development - mount source code
|
| 28 |
+
- ./app.py:/app/app.py:ro
|
| 29 |
+
- ./utils.py:/app/utils.py:ro
|
| 30 |
+
- ./image-download:/app/image-download:ro
|
| 31 |
+
- ./remove-background:/app/remove-background:ro
|
| 32 |
+
- ./bounding-box:/app/bounding-box:ro
|
| 33 |
+
- ./cropping-padding:/app/cropping-padding:ro
|
| 34 |
+
- ./return-images:/app/return-images:ro
|
| 35 |
+
deploy:
|
| 36 |
+
resources:
|
| 37 |
+
limits:
|
| 38 |
+
memory: 16G
|
| 39 |
+
reservations:
|
| 40 |
+
devices:
|
| 41 |
+
- driver: nvidia
|
| 42 |
+
count: 1
|
| 43 |
+
capabilities: [gpu]
|
| 44 |
+
healthcheck:
|
| 45 |
+
test: ["CMD", "curl", "-f", "http://localhost:7860/health"]
|
| 46 |
+
interval: 30s
|
| 47 |
+
timeout: 10s
|
| 48 |
+
retries: 3
|
| 49 |
+
start_period: 60s
|
| 50 |
+
restart: unless-stopped
|
| 51 |
+
networks:
|
| 52 |
+
- app-network
|
| 53 |
+
|
| 54 |
+
# Optional: Nginx reverse proxy for production
|
| 55 |
+
nginx:
|
| 56 |
+
image: nginx:alpine
|
| 57 |
+
ports:
|
| 58 |
+
- "80:80"
|
| 59 |
+
- "443:443"
|
| 60 |
+
volumes:
|
| 61 |
+
- ./nginx.conf:/etc/nginx/nginx.conf:ro
|
| 62 |
+
- ./ssl:/etc/nginx/ssl:ro
|
| 63 |
+
depends_on:
|
| 64 |
+
- product-image-update
|
| 65 |
+
networks:
|
| 66 |
+
- app-network
|
| 67 |
+
profiles:
|
| 68 |
+
- production
|
| 69 |
+
|
| 70 |
+
# Optional: Monitoring with Prometheus
|
| 71 |
+
prometheus:
|
| 72 |
+
image: prom/prometheus:latest
|
| 73 |
+
ports:
|
| 74 |
+
- "9090:9090"
|
| 75 |
+
volumes:
|
| 76 |
+
- ./prometheus.yml:/etc/prometheus/prometheus.yml:ro
|
| 77 |
+
- prometheus-data:/prometheus
|
| 78 |
+
command:
|
| 79 |
+
- '--config.file=/etc/prometheus/prometheus.yml'
|
| 80 |
+
- '--storage.tsdb.path=/prometheus'
|
| 81 |
+
networks:
|
| 82 |
+
- app-network
|
| 83 |
+
profiles:
|
| 84 |
+
- monitoring
|
| 85 |
+
|
| 86 |
+
# Optional: Grafana for visualization
|
| 87 |
+
grafana:
|
| 88 |
+
image: grafana/grafana:latest
|
| 89 |
+
ports:
|
| 90 |
+
- "3000:3000"
|
| 91 |
+
environment:
|
| 92 |
+
- GF_SECURITY_ADMIN_PASSWORD=admin
|
| 93 |
+
volumes:
|
| 94 |
+
- grafana-data:/var/lib/grafana
|
| 95 |
+
depends_on:
|
| 96 |
+
- prometheus
|
| 97 |
+
networks:
|
| 98 |
+
- app-network
|
| 99 |
+
profiles:
|
| 100 |
+
- monitoring
|
| 101 |
+
|
| 102 |
+
volumes:
|
| 103 |
+
huggingface-cache:
|
| 104 |
+
driver: local
|
| 105 |
+
torch-cache:
|
| 106 |
+
driver: local
|
| 107 |
+
models-cache:
|
| 108 |
+
driver: local
|
| 109 |
+
prometheus-data:
|
| 110 |
+
driver: local
|
| 111 |
+
grafana-data:
|
| 112 |
+
driver: local
|
| 113 |
+
|
| 114 |
+
networks:
|
| 115 |
+
app-network:
|
| 116 |
+
driver: bridge
|
README.md
ADDED
|
Binary file (248 Bytes). View file
|
|
|
app.py
ADDED
|
Binary file (84.4 kB). View file
|
|
|
requirements.txt
ADDED
|
Binary file (1.34 kB). View file
|
|
|
src/api/__init__.py
ADDED
|
Binary file (159 Bytes). View file
|
|
|
src/api/gpu_functions.py
ADDED
|
Binary file (3.03 kB). View file
|
|
|
src/api/routes.py
ADDED
|
Binary file (1.62 kB). View file
|
|
|
src/config/__init__.py
ADDED
|
Binary file (2.13 kB). View file
|
|
|
src/config/constants.py
ADDED
|
Binary file (3.44 kB). View file
|
|
|
src/models/__init__.py
ADDED
|
Binary file (3.25 kB). View file
|
|
|
src/models/model_loader.py
ADDED
|
Binary file (34.4 kB). View file
|
|
|
src/pipeline/__init__.py
ADDED
|
Binary file (325 Bytes). View file
|
|
|
src/pipeline/executor.py
ADDED
|
Binary file (1.05 kB). View file
|
|
|
src/pipeline/pipeline_steps.py
ADDED
|
Binary file (1.3 kB). View file
|
|
|
src/processing/bounding_box/bounding_box.py
ADDED
|
Binary file (49.2 kB). View file
|
|
|
src/processing/bounding_box/flow_diagram.mermaid
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
flowchart TD
|
| 2 |
+
classDef blueBox fill:#0000ff33,stroke:#0000ff,color:#000
|
| 3 |
+
classDef greenBox fill:#00ff0033,stroke:#00ff00,color:#000
|
| 4 |
+
classDef violetBox fill:#ee82ee33,stroke:#ee82ee,color:#000
|
| 5 |
+
classDef orangeBox fill:#ffa50033,stroke:#ffa500,color:#000
|
| 6 |
+
classDef redBox fill:#ff000033,stroke:#ff0000,color:#000
|
| 7 |
+
classDef multiBox fill:lightyellow,stroke:#333,color:#000
|
| 8 |
+
classDef processNode fill:#dae8fc,stroke:#6c8ebf,color:#000
|
| 9 |
+
classDef dataNode fill:#d5e8d4,stroke:#82b366,color:#000
|
| 10 |
+
|
| 11 |
+
Input[/"Input Image"/] --> Step1
|
| 12 |
+
|
| 13 |
+
subgraph Models["Detection Models"]
|
| 14 |
+
direction TB
|
| 15 |
+
RTDETR["RT-DETR Model
|
| 16 |
+
🟦 Main Product Detection:
|
| 17 |
+
person, coat, dress, jacket,
|
| 18 |
+
shirt, skirt, pants, shorts
|
| 19 |
+
|
| 20 |
+
🟧 Some Features:
|
| 21 |
+
tie → collar"]:::multiBox
|
| 22 |
+
|
| 23 |
+
RTDETR_A["RT-DETR Artifact Detection
|
| 24 |
+
🟥 Artifacts:
|
| 25 |
+
backpack, handbag → bag,
|
| 26 |
+
bottle, cup → cup,
|
| 27 |
+
book, cell phone, camera,
|
| 28 |
+
umbrella"]:::redBox
|
| 29 |
+
|
| 30 |
+
YOLO["YOLOv11 Model
|
| 31 |
+
🟦 Main Products:
|
| 32 |
+
jacket, coat, shirt, dress,
|
| 33 |
+
vest, pants, jeans, shorts
|
| 34 |
+
|
| 35 |
+
🟪 Shoes:
|
| 36 |
+
footwear, shoes, boots,
|
| 37 |
+
high heels, sandals
|
| 38 |
+
|
| 39 |
+
🟧 Some Features:
|
| 40 |
+
tie → collar"]:::multiBox
|
| 41 |
+
|
| 42 |
+
HEAD["Head Detection Model
|
| 43 |
+
🟩 Head:
|
| 44 |
+
face, head"]:::greenBox
|
| 45 |
+
end
|
| 46 |
+
|
| 47 |
+
Step1["Step 1: Define Largest Box
|
| 48 |
+
Use RT-DETR to identify
|
| 49 |
+
the main product region"]:::processNode
|
| 50 |
+
RTDETR --> Step1
|
| 51 |
+
Step1 --> Step2
|
| 52 |
+
|
| 53 |
+
Step2["Step 2: Multi-model Detection
|
| 54 |
+
Run all models on
|
| 55 |
+
largest box region"]:::processNode
|
| 56 |
+
RTDETR --> Step2
|
| 57 |
+
RTDETR_A --> Step2
|
| 58 |
+
YOLO --> Step2
|
| 59 |
+
HEAD --> Step2
|
| 60 |
+
Step2 --> Step3
|
| 61 |
+
|
| 62 |
+
Step3["Step 3: Color Assignment
|
| 63 |
+
& Keyword Mapping"]:::processNode --> Categories
|
| 64 |
+
|
| 65 |
+
subgraph Categories["Map objects to categories"]
|
| 66 |
+
direction TB
|
| 67 |
+
Product["🟦 BLUE: Product Type (0.4)
|
| 68 |
+
jacket, shirt, vest,
|
| 69 |
+
jeans, shorts, skirt,
|
| 70 |
+
overall, dress"]:::blueBox
|
| 71 |
+
|
| 72 |
+
Head["🟩 GREEN: Head (0.5)
|
| 73 |
+
head"]:::greenBox
|
| 74 |
+
|
| 75 |
+
Shoes["🟪 VIOLET: Shoes (0.5)
|
| 76 |
+
shoes"]:::violetBox
|
| 77 |
+
|
| 78 |
+
Features["🟧 ORANGE: Features (0.4)
|
| 79 |
+
neckline, collar, sleeve,
|
| 80 |
+
closure, pocket"]:::orangeBox
|
| 81 |
+
|
| 82 |
+
Artifacts["🟥 RED: Artifacts (0.7)
|
| 83 |
+
bag, cup, hanger, book,
|
| 84 |
+
phone, camera, umbrella"]:::redBox
|
| 85 |
+
end
|
| 86 |
+
|
| 87 |
+
Categories --> Step4
|
| 88 |
+
|
| 89 |
+
Step4["Step 4: Adjust Blue Box
|
| 90 |
+
Refine product box based on
|
| 91 |
+
head and shoe positions"]:::processNode --> Step5
|
| 92 |
+
|
| 93 |
+
Step5["Step 5: Draw Boxes
|
| 94 |
+
Add colored bbox
|
| 95 |
+
to image"]:::processNode --> Output
|
| 96 |
+
|
| 97 |
+
Output["Processed Image
|
| 98 |
+
With Boxes"]:::dataNode
|
src/processing/bounding_box/head_model.py
ADDED
|
Binary file (5.25 kB). View file
|
|
|
src/processing/bounding_box/rtdetr_model.py
ADDED
|
Binary file (13.9 kB). View file
|
|
|
src/processing/bounding_box/yolos_fashionpedia_model.py
ADDED
|
Binary file (12.1 kB). View file
|
|
|
src/processing/cropping_padding/cropping_padding.py
ADDED
|
Binary file (40.1 kB). View file
|
|
|
src/processing/cropping_padding/flow_diagram.mermaid
ADDED
|
@@ -0,0 +1,224 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
flowchart TD
|
| 2 |
+
classDef processNode fill:#dae8fc,stroke:#6c8ebf,color:#000
|
| 3 |
+
classDef imageNode fill:#d5e8d4,stroke:#82b366,color:#000
|
| 4 |
+
classDef decisionNode fill:#fff2cc,stroke:#d6b656,color:#000
|
| 5 |
+
classDef squareNode fill:#e3f2fd,stroke:#1976d2,color:#000
|
| 6 |
+
classDef landscapeNode fill:#e8f5e8,stroke:#388e3c,color:#000
|
| 7 |
+
classDef portraitNode fill:#fce4ec,stroke:#c2185b,color:#000
|
| 8 |
+
classDef transformNode fill:#f3e5f5,stroke:#7b1fa2,color:#000
|
| 9 |
+
classDef borderNode fill:#fff3e0,stroke:#f57c00,color:#000
|
| 10 |
+
|
| 11 |
+
Input[/"Input RGBA Image
|
| 12 |
+
with transparent background"/]:::imageNode
|
| 13 |
+
Input --> InitialCrop
|
| 14 |
+
|
| 15 |
+
InitialCrop["Initial Cropping
|
| 16 |
+
• Use blue box coordinates from detection
|
| 17 |
+
• Remove excess vertical space
|
| 18 |
+
• Preserve product boundaries"]:::processNode
|
| 19 |
+
InitialCrop --> ShrinkBox
|
| 20 |
+
|
| 21 |
+
ShrinkBox["Shrink Primary Box
|
| 22 |
+
• Analyze alpha channel transparency
|
| 23 |
+
• Remove white-only regions
|
| 24 |
+
• Create tight bounding box"]:::processNode
|
| 25 |
+
ShrinkBox --> BorderDetect
|
| 26 |
+
|
| 27 |
+
BorderDetect["Border Detection Analysis
|
| 28 |
+
• Measure edge coverage ratios
|
| 29 |
+
• Calculate feathering smoothness
|
| 30 |
+
• Classify border types as straight lines"]:::processNode
|
| 31 |
+
BorderDetect --> OrientCheck
|
| 32 |
+
|
| 33 |
+
OrientCheck{"Determine Image Orientation
|
| 34 |
+
Width vs Height Analysis"}:::decisionNode
|
| 35 |
+
OrientCheck -->|"Width ≈ Height"| SquareFlow
|
| 36 |
+
OrientCheck -->|"Width > Height"| LandscapeFlow
|
| 37 |
+
OrientCheck -->|"Height > Width"| PortraitFlow
|
| 38 |
+
|
| 39 |
+
subgraph SquareFlow["Square Image Transformation Pipeline □"]
|
| 40 |
+
SqInput["Square Input □
|
| 41 |
+
Width ≈ Height"]:::squareNode
|
| 42 |
+
SqInput --> SqCheck{"Detect Special Cases"}:::decisionNode
|
| 43 |
+
|
| 44 |
+
SqCheck -->|"Shoes Detected"| SqShoes["Shoes Transformation:
|
| 45 |
+
□ → Pad bottom only
|
| 46 |
+
□ → Ignore lower border flags
|
| 47 |
+
□ → Result: Slightly taller square"]:::squareNode
|
| 48 |
+
|
| 49 |
+
SqCheck -->|"Head + Borders"| SqHeadBorder["Head with Borders:
|
| 50 |
+
□ → No padding changes
|
| 51 |
+
□ → Preserve existing borders
|
| 52 |
+
□ → Result: Original square maintained"]:::squareNode
|
| 53 |
+
|
| 54 |
+
SqCheck -->|"Border Lines"| SqBorders["Border Lines Present:
|
| 55 |
+
□ → No additional padding
|
| 56 |
+
□ → Maintain current dimensions
|
| 57 |
+
□ → Result: Original square preserved"]:::squareNode
|
| 58 |
+
|
| 59 |
+
SqCheck -->|"Default Case"| SqDefault["Universal Padding:
|
| 60 |
+
□ → UNIVERSAL_PAD_RATIO on all sides
|
| 61 |
+
□ → Equal padding: top, bottom, left, right
|
| 62 |
+
□ → Result: Larger square with even spacing"]:::squareNode
|
| 63 |
+
|
| 64 |
+
SqShoes --> SqOutput["□ Final Square Output"]:::squareNode
|
| 65 |
+
SqHeadBorder --> SqOutput
|
| 66 |
+
SqBorders --> SqOutput
|
| 67 |
+
SqDefault --> SqOutput
|
| 68 |
+
end
|
| 69 |
+
|
| 70 |
+
subgraph LandscapeFlow["Landscape Image Transformation Pipeline ▭"]
|
| 71 |
+
LsInput["Landscape Input ▭
|
| 72 |
+
Width > Height"]:::landscapeNode
|
| 73 |
+
LsInput --> LsCheck{"Detect Special Cases"}:::decisionNode
|
| 74 |
+
|
| 75 |
+
LsCheck -->|"Shoes Detected"| LsShoes["Shoes Transformation:
|
| 76 |
+
▭ → Add bottom padding
|
| 77 |
+
▭ → Crop to square format
|
| 78 |
+
▭ → Result: Square with shoes at bottom"]:::transformNode
|
| 79 |
+
|
| 80 |
+
LsCheck -->|"Head Detected"| LsHead["Head Transformation:
|
| 81 |
+
▭ → Remove top padding
|
| 82 |
+
▭ → Keep landscape shape square
|
| 83 |
+
▭ → Result: Square with head positioned well"]:::transformNode
|
| 84 |
+
|
| 85 |
+
LsCheck -->|"Border Lines"| LsBorders["Border Lines Transformation:
|
| 86 |
+
▭ → Coverage-based cropping
|
| 87 |
+
▭ → Analyze content distribution
|
| 88 |
+
▭ → Result: Square from content analysis"]:::transformNode
|
| 89 |
+
|
| 90 |
+
LsCheck -->|"Default Case"| LsDefault["Two-Step Transformation:
|
| 91 |
+
▭ → Step 1: Pad height to approach square
|
| 92 |
+
▭ → Step 2: Equalize to perfect square
|
| 93 |
+
▭ → Result: Centered square with even padding"]:::transformNode
|
| 94 |
+
|
| 95 |
+
LsShoes --> LsOutput["□ Final Square Output"]:::landscapeNode
|
| 96 |
+
LsHead --> LsOutput
|
| 97 |
+
LsBorders --> LsOutput
|
| 98 |
+
LsDefault --> LsOutput
|
| 99 |
+
end
|
| 100 |
+
|
| 101 |
+
subgraph PortraitFlow["Portrait Image Transformation Pipeline ▯"]
|
| 102 |
+
PtInput["Portrait Input ▯
|
| 103 |
+
Height > Width"]:::portraitNode
|
| 104 |
+
PtInput --> PtCheck{"Detect Special Cases"}:::decisionNode
|
| 105 |
+
|
| 106 |
+
PtCheck -->|"Shoes Detected"| PtShoes["Shoes Transformation:
|
| 107 |
+
▯ → Never pad top
|
| 108 |
+
▯ → Always pad bottom
|
| 109 |
+
▯ → Pad sides to square
|
| 110 |
+
▯ → Result: Square with shoes at bottom"]:::transformNode
|
| 111 |
+
|
| 112 |
+
PtCheck -->|"Head Detected"| PtHead["Head Transformation:
|
| 113 |
+
▯ → Pad left and right only
|
| 114 |
+
▯ → Maintain head position
|
| 115 |
+
▯ → Result: Square with head positioned"]:::transformNode
|
| 116 |
+
|
| 117 |
+
PtCheck -->|"L/R Borders"| PtLR["L/R Borders Transformation:
|
| 118 |
+
▯ → Coverage-based cropping
|
| 119 |
+
▯ → Remove excess based on content density
|
| 120 |
+
▯ → Result: Square from intelligent cropping"]:::transformNode
|
| 121 |
+
|
| 122 |
+
PtCheck -->|"U+L Borders"| PtUL["U+L Borders Transformation:
|
| 123 |
+
▯ → Pad left and right only
|
| 124 |
+
▯ → Maintain vertical borders
|
| 125 |
+
▯ → Result: Square with preserved content"]:::transformNode
|
| 126 |
+
|
| 127 |
+
PtCheck -->|"One Border"| PtOne["Single Border Transformation:
|
| 128 |
+
▯ → Two-step padding process
|
| 129 |
+
▯ → Step 1: Pad width toward square
|
| 130 |
+
▯ → Result: Balanced square output"]:::transformNode
|
| 131 |
+
|
| 132 |
+
PtCheck -->|"Default Case"| PtDefault["Default Transformation:
|
| 133 |
+
▯ → Two-step padding process
|
| 134 |
+
▯ → Step 1: Add horizontal padding
|
| 135 |
+
▯ → Result: Centered square with side padding"]:::transformNode
|
| 136 |
+
|
| 137 |
+
PtShoes --> PtOutput["□ Final Square Output"]:::portraitNode
|
| 138 |
+
PtHead --> PtOutput
|
| 139 |
+
PtLR --> PtOutput
|
| 140 |
+
PtUL --> PtOutput
|
| 141 |
+
PtOne --> PtOutput
|
| 142 |
+
PtDefault --> PtOutput
|
| 143 |
+
end
|
| 144 |
+
|
| 145 |
+
SqOutput --> Centering
|
| 146 |
+
LsOutput --> Centering
|
| 147 |
+
PtOutput --> Centering
|
| 148 |
+
|
| 149 |
+
Centering["Final Object Centering
|
| 150 |
+
• Analyze alpha channel boundaries
|
| 151 |
+
• Calculate product midpoint
|
| 152 |
+
• Shift horizontally to center
|
| 153 |
+
• Maintain padding relationships"]:::processNode
|
| 154 |
+
Centering --> FinalOutput
|
| 155 |
+
|
| 156 |
+
FinalOutput[/"□ Final Processed Square Image
|
| 157 |
+
Consistently sized and centered"/]:::imageNode
|
| 158 |
+
|
| 159 |
+
subgraph BorderDetectionDetails["Border Detection Algorithm Details"]
|
| 160 |
+
BorderParams["Detection Parameters:
|
| 161 |
+
COVERAGE_THRESHOLD = 0.25
|
| 162 |
+
FEATHER_THRESHOLD_MIN = 0.3
|
| 163 |
+
FEATHER_THRESHOLD_MAX = 0.7"]:::borderNode
|
| 164 |
+
|
| 165 |
+
CoverageAnalysis["Coverage Analysis:
|
| 166 |
+
• Left Border: Vertical edge pattern
|
| 167 |
+
• Right Border: Opposite vertical edge
|
| 168 |
+
• Upper Border: Top horizontal edge
|
| 169 |
+
• Lower Border: Bottom horizontal edge"]:::borderNode
|
| 170 |
+
|
| 171 |
+
FeatheringAnalysis["Feathering Analysis:
|
| 172 |
+
• Hard Edges: Sharp transitions
|
| 173 |
+
• Soft Edges: Gradual transitions
|
| 174 |
+
• Feathering Ratio: Transition zone proportion"]:::borderNode
|
| 175 |
+
|
| 176 |
+
Classification["Classification Criteria:
|
| 177 |
+
Border = Straight Line when:
|
| 178 |
+
• Coverage > COVERAGE_THRESHOLD
|
| 179 |
+
• Feathering outside MIN-MAX range"]:::borderNode
|
| 180 |
+
|
| 181 |
+
BorderParams --> CoverageAnalysis --> FeatheringAnalysis --> Classification
|
| 182 |
+
end
|
| 183 |
+
|
| 184 |
+
BorderDetect -.-> BorderDetectionDetails
|
| 185 |
+
|
| 186 |
+
subgraph TransformationExamples["Visual Transformation Examples"]
|
| 187 |
+
ExampleSquare["Square Examples:
|
| 188 |
+
□ 1000×1000 → □ 1150×1150 (universal pad)
|
| 189 |
+
□ 1000×1000 → □ 1000×1000 (preserved)"]:::squareNode
|
| 190 |
+
|
| 191 |
+
ExampleLandscape["Landscape Examples:
|
| 192 |
+
▭ 1200×800 → ▭ 1200×1200 (height pad) → □ 1200×1200
|
| 193 |
+
▭ 1400×900 → Analysis → □ 1200×1200 (coverage crop)"]:::landscapeNode
|
| 194 |
+
|
| 195 |
+
ExamplePortrait["Portrait Examples:
|
| 196 |
+
▯ 800×1200 → ▯ 1200×1200 (width pad) → □ 1200×1200
|
| 197 |
+
▯ 600×1000 → ▯ 1000×1000 (side pad) → □ 1000×1000"]:::portraitNode
|
| 198 |
+
end
|
| 199 |
+
|
| 200 |
+
FinalOutput -.-> TransformationExamples
|
| 201 |
+
|
| 202 |
+
subgraph PaddingStrategies["Padding Strategy Implementation"]
|
| 203 |
+
UniversalPadding["Universal Padding:
|
| 204 |
+
UNIVERSAL_PAD_RATIO = 0.075
|
| 205 |
+
Applied as 7.5% of image dimension
|
| 206 |
+
1000px image = 75px padding per side"]:::borderNode
|
| 207 |
+
|
| 208 |
+
TwoStepPadding["Two-Step Padding:
|
| 209 |
+
Step 1: Pad longer dimension toward square
|
| 210 |
+
Step 2: Equalize shorter dimension
|
| 211 |
+
Step 3: Complete perfect square formation"]:::borderNode
|
| 212 |
+
|
| 213 |
+
CoverageCropping["Coverage-Based Cropping:
|
| 214 |
+
Step 1: Analyze content distribution
|
| 215 |
+
Step 2: Remove excess based on density
|
| 216 |
+
Step 3: Apply minimal padding
|
| 217 |
+
Step 4: Achieve square format"]:::borderNode
|
| 218 |
+
|
| 219 |
+
UniversalPadding --> TwoStepPadding --> CoverageCropping
|
| 220 |
+
end
|
| 221 |
+
|
| 222 |
+
SquareFlow -.-> PaddingStrategies
|
| 223 |
+
LandscapeFlow -.-> PaddingStrategies
|
| 224 |
+
PortraitFlow -.-> PaddingStrategies
|
src/processing/image_download/flow_diagram.md
ADDED
|
Binary file (5.97 kB). View file
|
|
|
src/processing/image_download/flow_diagram.mermaid
ADDED
|
File without changes
|
src/processing/image_download/image_download.py
ADDED
|
Binary file (7.03 kB). View file
|
|
|
src/processing/remove_background/remove_background_BiRefNet.py
ADDED
|
Binary file (47.5 kB). View file
|
|
|
src/processing/remove_background/remove_background_RMBG_2_0.py
ADDED
|
Binary file (47.4 kB). View file
|
|
|
src/processing/return_images/enhanced_color_detection.py
ADDED
|
Binary file (14.5 kB). View file
|
|
|
src/processing/return_images/fashion_clip_model.py
ADDED
|
Binary file (2.04 kB). View file
|
|
|
src/processing/return_images/flow_diagram.mermaid
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
flowchart TD
|
| 2 |
+
classDef processNode fill:#dae8fc,stroke:#6c8ebf,color:#000
|
| 3 |
+
classDef dataNode fill:#d5e8d4,stroke:#82b366,color:#000
|
| 4 |
+
classDef colorNode fill:#ffe6cc,stroke:#d79b00,color:#000
|
| 5 |
+
classDef typeNode fill:#e1d5e7,stroke:#9673a6,color:#000
|
| 6 |
+
classDef outputNode fill:#f8cecc,stroke:#b85450,color:#000
|
| 7 |
+
|
| 8 |
+
Input[/"Processed Image"/]:::dataNode --> Step1
|
| 9 |
+
|
| 10 |
+
Step1["Step 1: Color Extraction & Mapping
|
| 11 |
+
Sample non-transparent pixels,
|
| 12 |
+
quantize colors, determine dominant color"]:::processNode --> Step2
|
| 13 |
+
|
| 14 |
+
Step2["Step 2: Image Type Detection
|
| 15 |
+
Analyze padding information
|
| 16 |
+
and detection keywords"]:::processNode --> Step3
|
| 17 |
+
|
| 18 |
+
Step3["Step 3: WebP Conversion & Base64 Encoding
|
| 19 |
+
Check for artifacts, convert format,
|
| 20 |
+
encode for API response"]:::processNode --> Output
|
| 21 |
+
|
| 22 |
+
Output[/"Final Response Data"/]:::dataNode
|
| 23 |
+
|
| 24 |
+
subgraph ColorAnalysis["Color Classification Logic"]
|
| 25 |
+
RGBToHSL["RGB → HSL conversion"]:::processNode
|
| 26 |
+
|
| 27 |
+
MonochromeCheck{"Is monochrome?
|
| 28 |
+
(low saturation)"}:::colorNode
|
| 29 |
+
RGBToHSL --> MonochromeCheck
|
| 30 |
+
|
| 31 |
+
MonochromeCheck -->|"Yes"| GreyscaleGrouping{"Lightness values"}:::colorNode
|
| 32 |
+
GreyscaleGrouping -->|"< 0.28"| Black["Black"]:::colorNode
|
| 33 |
+
GreyscaleGrouping -->|"0.28-0.88"| Grey["Grey"]:::colorNode
|
| 34 |
+
GreyscaleGrouping -->|"> 0.88"| White["White"]:::colorNode
|
| 35 |
+
|
| 36 |
+
MonochromeCheck -->|"No"| BlueRangeCheck{"Is blue range?
|
| 37 |
+
(160° ≤ H < 260°)"}:::colorNode
|
| 38 |
+
BlueRangeCheck -->|"Yes"| BlueShades{"Lightness values"}:::colorNode
|
| 39 |
+
BlueShades -->|"< 0.40"| DarkBlue["Dark Blue"]:::colorNode
|
| 40 |
+
BlueShades -->|"0.40-0.65"| Blue["Blue"]:::colorNode
|
| 41 |
+
BlueShades -->|"> 0.65"| LightBlue["Light Blue"]:::colorNode
|
| 42 |
+
|
| 43 |
+
BlueRangeCheck -->|"No"| PrimaryColors{"Primary color groups
|
| 44 |
+
based on hue angle"}:::colorNode
|
| 45 |
+
PrimaryColors -->|"0°-70°"| YellowGroup["Yellow Group"]:::colorNode
|
| 46 |
+
PrimaryColors -->|"70°-160°"| GreenGroup["Green Group"]:::colorNode
|
| 47 |
+
PrimaryColors -->|"260°-360°"| RedGroup["Red Group"]:::colorNode
|
| 48 |
+
end
|
| 49 |
+
|
| 50 |
+
Step1 -.-> ColorAnalysis
|
| 51 |
+
|
| 52 |
+
subgraph TypeAssignment["Image Type Assignment"]
|
| 53 |
+
PaddingCheck{"All sides padded?"}:::typeNode
|
| 54 |
+
PaddingCheck -->|"Yes"| PaddedProduct["padded_product"]:::typeNode
|
| 55 |
+
PaddingCheck -->|"No"| HeadShoesCheck{"Head or shoes
|
| 56 |
+
detected?"}:::typeNode
|
| 57 |
+
HeadShoesCheck -->|"No"| DetailType["detail"]:::typeNode
|
| 58 |
+
HeadShoesCheck -->|"Yes"| DefaultType["none"]:::typeNode
|
| 59 |
+
end
|
| 60 |
+
|
| 61 |
+
Step2 -.-> TypeAssignment
|
src/processing/return_images/return_images.py
ADDED
|
Binary file (14.2 kB). View file
|
|
|
src/processing/return_images/segment_for_color.py
ADDED
|
Binary file (5.36 kB). View file
|
|
|
src/processing/return_images/timm_resnet50_a2_denim_extractor.py
ADDED
|
Binary file (22.6 kB). View file
|
|
|
src/processing/return_images/timm_resnet50_color.py
ADDED
|
Binary file (5.17 kB). View file
|
|
|
src/processing/under_development/under_development.py
ADDED
|
Binary file (19.8 kB). View file
|
|
|
src/utils/__init__.py
ADDED
|
Binary file (1.29 kB). View file
|
|
|
src/utils/context_utils.py
ADDED
|
Binary file (11.2 kB). View file
|
|
|
src/utils/logging_utils.py
ADDED
|
Binary file (10.3 kB). View file
|
|
|
tests/__init__.py
ADDED
|
Binary file (177 Bytes). View file
|
|
|
tests/config.py
ADDED
|
Binary file (401 Bytes). View file
|
|
|
tests/test_full_pipeline.py
ADDED
|
Binary file (6.97 kB). View file
|
|
|
tests/test_quota_handling.py
ADDED
|
Binary file (6.21 kB). View file
|
|
|