Files changed (13) hide show
  1. .gitignore +0 -207
  2. README.md +1 -3
  3. app.py +0 -0
  4. constants.py +0 -640
  5. image_processor.py +0 -130
  6. lora_dict.json +0 -0
  7. model_dict.json +0 -0
  8. modutils.py +1290 -0
  9. packages.txt +1 -1
  10. pre-requirements.txt +0 -1
  11. requirements.txt +3 -12
  12. stablepy_model.py +0 -0
  13. utils.py +49 -713
.gitignore DELETED
@@ -1,207 +0,0 @@
1
- # Byte-compiled / optimized / DLL files
2
- __pycache__/
3
- *.py[codz]
4
- *$py.class
5
-
6
- # C extensions
7
- *.so
8
-
9
- # Distribution / packaging
10
- .Python
11
- build/
12
- develop-eggs/
13
- dist/
14
- downloads/
15
- eggs/
16
- .eggs/
17
- lib/
18
- lib64/
19
- parts/
20
- sdist/
21
- var/
22
- wheels/
23
- share/python-wheels/
24
- *.egg-info/
25
- .installed.cfg
26
- *.egg
27
- MANIFEST
28
-
29
- # PyInstaller
30
- # Usually these files are written by a python script from a template
31
- # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
- *.manifest
33
- *.spec
34
-
35
- # Installer logs
36
- pip-log.txt
37
- pip-delete-this-directory.txt
38
-
39
- # Unit test / coverage reports
40
- htmlcov/
41
- .tox/
42
- .nox/
43
- .coverage
44
- .coverage.*
45
- .cache
46
- nosetests.xml
47
- coverage.xml
48
- *.cover
49
- *.py.cover
50
- .hypothesis/
51
- .pytest_cache/
52
- cover/
53
-
54
- # Translations
55
- *.mo
56
- *.pot
57
-
58
- # Django stuff:
59
- *.log
60
- local_settings.py
61
- db.sqlite3
62
- db.sqlite3-journal
63
-
64
- # Flask stuff:
65
- instance/
66
- .webassets-cache
67
-
68
- # Scrapy stuff:
69
- .scrapy
70
-
71
- # Sphinx documentation
72
- docs/_build/
73
-
74
- # PyBuilder
75
- .pybuilder/
76
- target/
77
-
78
- # Jupyter Notebook
79
- .ipynb_checkpoints
80
-
81
- # IPython
82
- profile_default/
83
- ipython_config.py
84
-
85
- # pyenv
86
- # For a library or package, you might want to ignore these files since the code is
87
- # intended to run in multiple environments; otherwise, check them in:
88
- # .python-version
89
-
90
- # pipenv
91
- # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
- # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
- # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
- # install all needed dependencies.
95
- #Pipfile.lock
96
-
97
- # UV
98
- # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
99
- # This is especially recommended for binary packages to ensure reproducibility, and is more
100
- # commonly ignored for libraries.
101
- #uv.lock
102
-
103
- # poetry
104
- # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
105
- # This is especially recommended for binary packages to ensure reproducibility, and is more
106
- # commonly ignored for libraries.
107
- # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
108
- #poetry.lock
109
- #poetry.toml
110
-
111
- # pdm
112
- # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
113
- # pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
114
- # https://pdm-project.org/en/latest/usage/project/#working-with-version-control
115
- #pdm.lock
116
- #pdm.toml
117
- .pdm-python
118
- .pdm-build/
119
-
120
- # pixi
121
- # Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
122
- #pixi.lock
123
- # Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
124
- # in the .venv directory. It is recommended not to include this directory in version control.
125
- .pixi
126
-
127
- # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
128
- __pypackages__/
129
-
130
- # Celery stuff
131
- celerybeat-schedule
132
- celerybeat.pid
133
-
134
- # SageMath parsed files
135
- *.sage.py
136
-
137
- # Environments
138
- .env
139
- .envrc
140
- .venv
141
- env/
142
- venv/
143
- ENV/
144
- env.bak/
145
- venv.bak/
146
-
147
- # Spyder project settings
148
- .spyderproject
149
- .spyproject
150
-
151
- # Rope project settings
152
- .ropeproject
153
-
154
- # mkdocs documentation
155
- /site
156
-
157
- # mypy
158
- .mypy_cache/
159
- .dmypy.json
160
- dmypy.json
161
-
162
- # Pyre type checker
163
- .pyre/
164
-
165
- # pytype static type analyzer
166
- .pytype/
167
-
168
- # Cython debug symbols
169
- cython_debug/
170
-
171
- # PyCharm
172
- # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
173
- # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
174
- # and can be added to the global gitignore or merged into this file. For a more nuclear
175
- # option (not recommended) you can uncomment the following to ignore the entire idea folder.
176
- #.idea/
177
-
178
- # Abstra
179
- # Abstra is an AI-powered process automation framework.
180
- # Ignore directories containing user credentials, local state, and settings.
181
- # Learn more at https://abstra.io/docs
182
- .abstra/
183
-
184
- # Visual Studio Code
185
- # Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
186
- # that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
187
- # and can be added to the global gitignore or merged into this file. However, if you prefer,
188
- # you could uncomment the following to ignore the entire vscode folder
189
- # .vscode/
190
-
191
- # Ruff stuff:
192
- .ruff_cache/
193
-
194
- # PyPI configuration file
195
- .pypirc
196
-
197
- # Cursor
198
- # Cursor is an AI-powered code editor. `.cursorignore` specifies files/directories to
199
- # exclude from AI features like autocomplete and code analysis. Recommended for sensitive data
200
- # refer to https://docs.cursor.com/context/ignore-files
201
- .cursorignore
202
- .cursorindexingignore
203
-
204
- # Marimo
205
- marimo/_static/
206
- marimo/_lsp/
207
- __marimo__/
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md CHANGED
@@ -4,13 +4,11 @@ emoji: 🧩🖼️
4
  colorFrom: red
5
  colorTo: pink
6
  sdk: gradio
7
- sdk_version: 5.44.1
8
  app_file: app.py
9
  pinned: true
10
  license: mit
11
  short_description: Stunning images using stable diffusion.
12
- preload_from_hub:
13
- - madebyollin/sdxl-vae-fp16-fix config.json,diffusion_pytorch_model.safetensors
14
  ---
15
 
16
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
4
  colorFrom: red
5
  colorTo: pink
6
  sdk: gradio
7
+ sdk_version: 4.31.3
8
  app_file: app.py
9
  pinned: true
10
  license: mit
11
  short_description: Stunning images using stable diffusion.
 
 
12
  ---
13
 
14
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py CHANGED
The diff for this file is too large to render. See raw diff
 
constants.py DELETED
@@ -1,640 +0,0 @@
1
- import os
2
- from stablepy.diffusers_vanilla.constants import FLUX_CN_UNION_MODES
3
- from stablepy import (
4
- scheduler_names,
5
- SD15_TASKS,
6
- SDXL_TASKS,
7
- ALL_BUILTIN_UPSCALERS,
8
- IP_ADAPTERS_SD,
9
- IP_ADAPTERS_SDXL,
10
- PROMPT_WEIGHT_OPTIONS_PRIORITY,
11
- )
12
-
13
- IS_ZERO_GPU = bool(os.getenv("SPACES_ZERO_GPU"))
14
-
15
- # - **Download Models**
16
- DOWNLOAD_MODEL = "https://huggingface.co/zuv0/test/resolve/main/milkyWonderland_v40.safetensors"
17
-
18
- # - **Download VAEs**
19
- DOWNLOAD_VAE = "https://huggingface.co/Anzhc/Anzhcs-VAEs/resolve/main/SDXL%20Anime%20VAE%20Dec-only%20B3.safetensors, https://huggingface.co/fp16-guy/anything_kl-f8-anime2_vae-ft-mse-840000-ema-pruned_blessed_clearvae_fp16_cleaned/resolve/main/vae-ft-mse-840000-ema-pruned_fp16.safetensors?download=true"
20
-
21
- # - **Download LoRAs**
22
- DOWNLOAD_LORA = "https://huggingface.co/Leopain/color/resolve/main/Coloring_book_-_LineArt.safetensors, https://civitai.com/api/download/models/135867, https://huggingface.co/Linaqruf/anime-detailer-xl-lora/resolve/main/anime-detailer-xl.safetensors?download=true, https://huggingface.co/Linaqruf/style-enhancer-xl-lora/resolve/main/style-enhancer-xl.safetensors?download=true, https://huggingface.co/ByteDance/Hyper-SD/resolve/main/Hyper-SD15-8steps-CFG-lora.safetensors?download=true, https://huggingface.co/ByteDance/Hyper-SD/resolve/main/Hyper-SDXL-8steps-CFG-lora.safetensors?download=true"
23
-
24
- LOAD_DIFFUSERS_FORMAT_MODEL = [
25
- 'TestOrganizationPleaseIgnore/potato_quality_anime_plzwork_sdxl',
26
- 'TestOrganizationPleaseIgnore/rinAnim8drawIllustriousXL_v20_sdxl',
27
- 'TestOrganizationPleaseIgnore/perfectrsbmixIllustrious_definitiveiota_sdxl',
28
- 'stabilityai/stable-diffusion-xl-base-1.0',
29
- 'Laxhar/noobai-XL-1.1',
30
- 'Laxhar/noobai-XL-Vpred-1.0',
31
- 'black-forest-labs/FLUX.1-dev',
32
- 'black-forest-labs/FLUX.1-Krea-dev',
33
- 'John6666/blue-pencil-flux1-v021-fp8-flux',
34
- 'John6666/wai-ani-flux-v10forfp8-fp8-flux',
35
- 'John6666/xe-anime-flux-v04-fp8-flux',
36
- 'John6666/lyh-anime-flux-v2a1-fp8-flux',
37
- 'John6666/carnival-unchained-v10-fp8-flux',
38
- 'Freepik/flux.1-lite-8B-alpha',
39
- 'shauray/FluxDev-HyperSD-merged',
40
- 'mikeyandfriends/PixelWave_FLUX.1-dev_03',
41
- 'terminusresearch/FluxBooru-v0.3',
42
- 'black-forest-labs/FLUX.1-schnell',
43
- # 'ostris/OpenFLUX.1',
44
- 'shuttleai/shuttle-3-diffusion',
45
- 'Laxhar/noobai-XL-1.0',
46
- 'Laxhar/noobai-XL-0.77',
47
- 'John6666/noobai-xl-nai-xl-epsilonpred075version-sdxl',
48
- 'Laxhar/noobai-XL-0.6',
49
- 'John6666/noobai-xl-nai-xl-epsilonpred05version-sdxl',
50
- 'John6666/noobai-cyberfix-v10-sdxl',
51
- 'John6666/noobaiiter-xl-vpred-v075-sdxl',
52
- 'John6666/ripplemix-noob-vpred10-illustrious01-v14-sdxl',
53
- 'John6666/sigmaih-15-sdxl',
54
- 'John6666/ntr-mix-illustrious-xl-noob-xl-xi-sdxl',
55
- 'John6666/ntr-mix-illustrious-xl-noob-xl-xii-sdxl',
56
- 'John6666/ntr-mix-illustrious-xl-noob-xl-xiii-sdxl',
57
- 'martineux/nova-unreal10',
58
- 'John6666/mistoon-anime-v10illustrious-sdxl',
59
- 'John6666/hassaku-xl-illustrious-v22-sdxl',
60
- 'John6666/hassaku-xl-illustrious-v31-sdxl',
61
- 'John6666/haruki-mix-illustrious-v10-sdxl',
62
- 'John6666/noobreal-v10-sdxl',
63
- 'John6666/complicated-noobai-merge-vprediction-sdxl',
64
- 'Laxhar/noobai-XL-Vpred-0.9r',
65
- 'Laxhar/noobai-XL-Vpred-0.75s',
66
- 'Laxhar/noobai-XL-Vpred-0.75',
67
- 'Laxhar/noobai-XL-Vpred-0.65s',
68
- 'Laxhar/noobai-XL-Vpred-0.65',
69
- 'Laxhar/noobai-XL-Vpred-0.6',
70
- 'John6666/cat-tower-noobai-xl-checkpoint-v14vpred-sdxl',
71
- 'John6666/cat-tower-noobai-xl-checkpoint-v15vpred-sdxl',
72
- 'John6666/cat-tower-noobai-xl-checkpoint-v20-vpred-sdxl',
73
- 'John6666/noobai-xl-nai-xl-vpred05version-sdxl',
74
- 'John6666/noobai-fusion2-vpred-itercomp-v1-sdxl',
75
- 'John6666/noobai-xl-nai-xl-vpredtestversion-sdxl',
76
- 'John6666/chadmix-noobai075-illustrious01-v10-sdxl',
77
- 'OnomaAIResearch/Illustrious-xl-early-release-v0',
78
- 'John6666/illustriousxl-mmmix-v50-sdxl',
79
- 'John6666/illustrious-pencil-xl-v200-sdxl',
80
- 'John6666/obsession-illustriousxl-v21-sdxl',
81
- 'John6666/obsession-illustriousxl-v30-sdxl',
82
- 'John6666/obsession-illustriousxl-v31-sdxl',
83
- 'John6666/one-obsession-13-sdxl',
84
- 'John6666/one-obsession-14-24d-sdxl',
85
- 'John6666/one-obsession-15-noobai-sdxl',
86
- 'John6666/one-obsession-v16-noobai-sdxl',
87
- 'John6666/one-obsession-17-red-sdxl',
88
- 'martineux/oneobs18',
89
- 'martineux/oneobsession19',
90
- 'John6666/cat-tower-noobai-xl-checkpoint-v14-epsilon-pred-sdxl',
91
- 'martineux/cattower-chenkin-xl',
92
- 'John6666/prefect-illustrious-xl-v3-sdxl',
93
- 'martineux/perfect4',
94
- 'martineux/prefectIllustriousXL_v5',
95
- 'John6666/wai-nsfw-illustrious-v70-sdxl',
96
- 'John6666/wai-nsfw-illustrious-sdxl-v140-sdxl',
97
- 'martineux/waiIllustriousSDXL_v160',
98
- 'John6666/illustrious-pony-mix-v3-sdxl',
99
- 'John6666/nova-anime-xl-il-v90-sdxl',
100
- 'John6666/nova-anime-xl-il-v110-sdxl',
101
- 'frankjoshua/novaAnimeXL_ilV140',
102
- 'John6666/nova-orange-xl-re-v10-sdxl',
103
- 'John6666/nova-orange-xl-v110-sdxl',
104
- 'John6666/nova-orange-xl-re-v20-sdxl',
105
- 'John6666/nova-unreal-xl-v60-sdxl',
106
- 'John6666/nova-unreal-xl-v70-sdxl',
107
- 'John6666/nova-unreal-xl-v80-sdxl',
108
- 'martineux/nova-unreal10',
109
- 'John6666/nova-cartoon-xl-v40-sdxl',
110
- 'martineux/novacartoon6',
111
- 'martineux/novareal8',
112
- 'John6666/silvermoon-mix03-illustrious-v10-sdxl',
113
- 'eienmojiki/Anything-XL',
114
- 'eienmojiki/Starry-XL-v5.2',
115
- 'votepurchase/plantMilkModelSuite_walnut',
116
- 'John6666/meinaxl-v2-sdxl',
117
- 'Eugeoter/artiwaifu-diffusion-2.0',
118
- 'comin/IterComp',
119
- 'John6666/epicrealism-xl-v8kiss-sdxl',
120
- 'John6666/epicrealism-xl-v10kiss2-sdxl',
121
- 'John6666/epicrealism-xl-vxiabeast-sdxl',
122
- 'John6666/epicrealism-xl-vxvii-crystal-clear-realism-sdxl',
123
- 'misri/zavychromaxl_v80',
124
- 'SG161222/RealVisXL_V4.0',
125
- 'SG161222/RealVisXL_V5.0',
126
- 'misri/newrealityxlAllInOne_Newreality40',
127
- 'gsdf/CounterfeitXL',
128
- 'WhiteAiZ/autismmixSDXL_autismmixConfetti_diffusers',
129
- 'kitty7779/ponyDiffusionV6XL',
130
- 'GraydientPlatformAPI/aniverse-pony',
131
- 'John6666/ras-real-anime-screencap-v1-sdxl',
132
- 'John6666/duchaiten-pony-xl-no-score-v60-sdxl',
133
- 'John6666/mistoon-anime-ponyalpha-sdxl',
134
- 'John6666/mistoon-xl-copper-v20fast-sdxl',
135
- 'John6666/ebara-mfcg-pony-mix-v12-sdxl',
136
- 'John6666/t-ponynai3-v51-sdxl',
137
- 'John6666/t-ponynai3-v65-sdxl',
138
- 'John6666/t-ponynai3-v7-sdxl',
139
- 'John6666/prefect-pony-xl-v3-sdxl',
140
- 'John6666/prefect-pony-xl-v4-sdxl',
141
- 'John6666/prefect-pony-xl-v50-sdxl',
142
- 'John6666/mala-anime-mix-nsfw-pony-xl-v5-sdxl',
143
- 'John6666/wai-ani-nsfw-ponyxl-v10-sdxl',
144
- 'John6666/wai-real-mix-v11-sdxl',
145
- 'John6666/wai-shuffle-pdxl-v2-sdxl',
146
- 'John6666/wai-c-v6-sdxl',
147
- 'John6666/iniverse-mix-xl-sfwnsfw-pony-guofeng-v43-sdxl',
148
- 'John6666/sifw-annihilation-xl-v2-sdxl',
149
- 'John6666/sifw-annihilation-xl-v305illustrious-beta-sdxl',
150
- 'John6666/photo-realistic-pony-v5-sdxl',
151
- 'John6666/pony-realism-v21main-sdxl',
152
- 'John6666/pony-realism-v22main-sdxl',
153
- 'John6666/pony-realism-v23-ultra-sdxl',
154
- 'John6666/cyberrealistic-pony-v65-sdxl',
155
- 'John6666/cyberrealistic-pony-v7-sdxl',
156
- 'John6666/cyberrealistic-pony-v127-alternative-sdxl',
157
- 'GraydientPlatformAPI/realcartoon-pony-diffusion',
158
- 'John6666/nova-anime-xl-pony-v5-sdxl',
159
- 'John6666/autismmix-sdxl-autismmix-pony-sdxl',
160
- 'John6666/aimz-dream-real-pony-mix-v3-sdxl',
161
- 'John6666/prefectious-xl-nsfw-v10-sdxl',
162
- 'GraydientPlatformAPI/iniverseponyRealGuofeng49',
163
- 'John6666/duchaiten-pony-real-v11fix-sdxl',
164
- 'John6666/duchaiten-pony-real-v20-sdxl',
165
- 'John6666/duchaiten-pony-xl-no-score-v70-sdxl',
166
- 'KBlueLeaf/Kohaku-XL-Zeta',
167
- 'cagliostrolab/animagine-xl-3.1',
168
- 'cagliostrolab/animagine-xl-4.0',
169
- 'yodayo-ai/kivotos-xl-2.0',
170
- 'yodayo-ai/holodayo-xl-2.1',
171
- 'yodayo-ai/clandestine-xl-1.0',
172
- 'Raelina/Raehoshi-illust-XL-8',
173
- 'johnkillington/chenkinxmilfynoobai_v20-MLX',
174
- 'martineux/unholydesire5-xl',
175
- 'abacaxthebrave/Unholy_Desire_Mix_ILXL',
176
- 'martineux/diving5',
177
- 'martineux/diving7',
178
- 'martineux/mergestein-animuplus-xl',
179
- 'martineux/mergestein-uncannyr2-xl',
180
- 'martineux/steincustom_V12',
181
- 'martineux/miaomiao-realskin1p25-xl',
182
- 'martineux/miaov18',
183
- 'John6666/garage-mix-noob-vpred-eps-v10-vpred-sdxl',
184
- 'TestOrganizationPleaseIgnore/perfectrsbmixIllustrious_definitivelambda_sdxl',
185
- 'TestOrganizationPleaseIgnore/rinFlanimeIllustrious_v27_sdxl',
186
- 'TestOrganizationPleaseIgnore/rinAnimepopcute_v30_sdxl',
187
- 'TestOrganizationPleaseIgnore/potato_quality_anime_zzz_sdxl',
188
- 'https://huggingface.co/chemwolf/Karmix-XL-v0/resolve/main/Karmix-XL-v0.safetensors?download=true',
189
- 'https://civitai.com/api/download/models/128713?type=Model&format=SafeTensor&size=pruned&fp=fp16',
190
- 'https://civitai.com/models/30240?modelVersionId=125771',
191
- 'digiplay/majicMIX_sombre_v2',
192
- 'digiplay/majicMIX_realistic_v6',
193
- 'digiplay/majicMIX_realistic_v7',
194
- 'digiplay/DreamShaper_8',
195
- 'digiplay/BeautifulArt_v1',
196
- 'digiplay/DarkSushi2.5D_v1',
197
- 'digiplay/darkphoenix3D_v1.1',
198
- 'digiplay/BeenYouLiteL11_diffusers',
199
- 'GraydientPlatformAPI/rev-animated2',
200
- 'myxlmynx/cyberrealistic_classic40',
201
- 'GraydientPlatformAPI/cyberreal6',
202
- 'GraydientPlatformAPI/cyberreal5',
203
- 'youknownothing/deliberate-v6',
204
- 'GraydientPlatformAPI/deliberate-cyber3',
205
- 'GraydientPlatformAPI/picx-real',
206
- 'GraydientPlatformAPI/perfectworld6',
207
- 'emilianJR/epiCRealism',
208
- 'votepurchase/counterfeitV30_v30',
209
- 'votepurchase/ChilloutMix',
210
- 'Meina/MeinaMix_V11',
211
- 'Meina/MeinaUnreal_V5',
212
- 'Meina/MeinaPastel_V7',
213
- 'GraydientPlatformAPI/realcartoon3d-17',
214
- 'GraydientPlatformAPI/realcartoon-pixar11',
215
- 'GraydientPlatformAPI/realcartoon-real17',
216
- ]
217
-
218
-
219
- DIFFUSERS_FORMAT_LORAS = [
220
- "nerijs/animation2k-flux",
221
- "XLabs-AI/flux-RealismLora",
222
- "Shakker-Labs/FLUX.1-dev-LoRA-Logo-Design",
223
- ]
224
-
225
- DOWNLOAD_EMBEDS = [
226
- 'https://huggingface.co/datasets/Nerfgun3/bad_prompt/blob/main/bad_prompt_version2.pt',
227
- # 'https://huggingface.co/embed/negative/resolve/main/EasyNegativeV2.safetensors',
228
- # 'https://huggingface.co/embed/negative/resolve/main/bad-hands-5.pt',
229
- ]
230
-
231
- CIVITAI_API_KEY = os.environ.get("CIVITAI_API_KEY")
232
- HF_TOKEN = os.environ.get("HF_READ_TOKEN")
233
-
234
- DIRECTORY_MODELS = 'models'
235
- DIRECTORY_LORAS = 'loras'
236
- DIRECTORY_VAES = 'vaes'
237
- DIRECTORY_EMBEDS = 'embedings'
238
- DIRECTORY_UPSCALERS = 'upscalers'
239
-
240
- STORAGE_ROOT = "/home/user/"
241
- CACHE_HF_ROOT = os.path.expanduser("~/.cache/huggingface")
242
- CACHE_HF = os.path.join(CACHE_HF_ROOT, "hub")
243
- if IS_ZERO_GPU:
244
- os.environ["HF_HOME"] = CACHE_HF
245
-
246
- TASK_STABLEPY = {
247
- 'txt2img': 'txt2img',
248
- 'img2img': 'img2img',
249
- 'inpaint': 'inpaint',
250
- # 'canny T2I Adapter': 'sdxl_canny_t2i', # NO HAVE STEP CALLBACK PARAMETERS SO NOT WORKS WITH DIFFUSERS 0.29.0
251
- # 'sketch T2I Adapter': 'sdxl_sketch_t2i',
252
- # 'lineart T2I Adapter': 'sdxl_lineart_t2i',
253
- # 'depth-midas T2I Adapter': 'sdxl_depth-midas_t2i',
254
- # 'openpose T2I Adapter': 'sdxl_openpose_t2i',
255
- 'openpose ControlNet': 'openpose',
256
- 'canny ControlNet': 'canny',
257
- 'mlsd ControlNet': 'mlsd',
258
- 'scribble ControlNet': 'scribble',
259
- 'softedge ControlNet': 'softedge',
260
- 'segmentation ControlNet': 'segmentation',
261
- 'depth ControlNet': 'depth',
262
- 'normalbae ControlNet': 'normalbae',
263
- 'lineart ControlNet': 'lineart',
264
- 'lineart_anime ControlNet': 'lineart_anime',
265
- 'shuffle ControlNet': 'shuffle',
266
- 'ip2p ControlNet': 'ip2p',
267
- 'optical pattern ControlNet': 'pattern',
268
- 'recolor ControlNet': 'recolor',
269
- 'tile ControlNet': 'tile',
270
- 'repaint ControlNet': 'repaint',
271
- }
272
-
273
- TASK_MODEL_LIST = list(TASK_STABLEPY.keys())
274
-
275
- UPSCALER_DICT_GUI = {
276
- None: None,
277
- **{bu: bu for bu in ALL_BUILTIN_UPSCALERS if bu not in ["HAT x4", "DAT x4", "DAT x3", "DAT x2", "SwinIR 4x"]},
278
- # "RealESRGAN_x4plus": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth",
279
- "RealESRNet_x4plus": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/RealESRNet_x4plus.pth",
280
- # "RealESRGAN_x4plus_anime_6B": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth",
281
- # "RealESRGAN_x2plus": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth",
282
- # "realesr-animevideov3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth",
283
- # "realesr-general-x4v3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth",
284
- # "realesr-general-wdn-x4v3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth",
285
- "4x-UltraSharp": "https://huggingface.co/Shandypur/ESRGAN-4x-UltraSharp/resolve/main/4x-UltraSharp.pth",
286
- "Real-ESRGAN-Anime-finetuning": "https://huggingface.co/danhtran2mind/Real-ESRGAN-Anime-finetuning/resolve/main/Real-ESRGAN-Anime-finetuning.pth",
287
- "4x_foolhardy_Remacri": "https://huggingface.co/FacehugmanIII/4x_foolhardy_Remacri/resolve/main/4x_foolhardy_Remacri.pth",
288
- "Remacri4xExtraSmoother": "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/Remacri%204x%20ExtraSmoother.pth",
289
- "AnimeSharp4x": "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/AnimeSharp%204x.pth",
290
- "lollypop": "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/lollypop.pth",
291
- "RealisticRescaler4x": "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/RealisticRescaler%204x.pth",
292
- "NickelbackFS4x": "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/NickelbackFS%204x.pth"
293
- }
294
-
295
- UPSCALER_KEYS = list(UPSCALER_DICT_GUI.keys())
296
-
297
- DIFFUSERS_CONTROLNET_MODEL = [
298
- "Automatic",
299
-
300
- "brad-twinkl/controlnet-union-sdxl-1.0-promax",
301
- "xinsir/controlnet-union-sdxl-1.0",
302
- "xinsir/anime-painter",
303
- "Eugeoter/noob-sdxl-controlnet-canny",
304
- "Eugeoter/noob-sdxl-controlnet-lineart_anime",
305
- "Eugeoter/noob-sdxl-controlnet-depth",
306
- "Eugeoter/noob-sdxl-controlnet-normal",
307
- "Eugeoter/noob-sdxl-controlnet-softedge_hed",
308
- "Eugeoter/noob-sdxl-controlnet-scribble_pidinet",
309
- "Eugeoter/noob-sdxl-controlnet-scribble_hed",
310
- "Eugeoter/noob-sdxl-controlnet-manga_line",
311
- "Eugeoter/noob-sdxl-controlnet-lineart_realistic",
312
- "Eugeoter/noob-sdxl-controlnet-depth_midas-v1-1",
313
- "dimitribarbot/controlnet-openpose-sdxl-1.0-safetensors",
314
- "r3gm/controlnet-openpose-sdxl-1.0-fp16",
315
- "r3gm/controlnet-canny-scribble-integrated-sdxl-v2-fp16",
316
- "r3gm/controlnet-union-sdxl-1.0-fp16",
317
- "r3gm/controlnet-lineart-anime-sdxl-fp16",
318
- "r3gm/control_v1p_sdxl_qrcode_monster_fp16",
319
- "r3gm/controlnet-tile-sdxl-1.0-fp16",
320
- "r3gm/controlnet-recolor-sdxl-fp16",
321
- "r3gm/controlnet-openpose-twins-sdxl-1.0-fp16",
322
- "r3gm/controlnet-qr-pattern-sdxl-fp16",
323
- "Yakonrus/SDXL_Controlnet_Tile_Realistic_v2",
324
- "TheMistoAI/MistoLine",
325
- "briaai/BRIA-2.3-ControlNet-Recoloring",
326
- "briaai/BRIA-2.3-ControlNet-Canny",
327
-
328
- "lllyasviel/control_v11p_sd15_openpose",
329
- "lllyasviel/control_v11p_sd15_canny",
330
- "lllyasviel/control_v11p_sd15_mlsd",
331
- "lllyasviel/control_v11p_sd15_scribble",
332
- "lllyasviel/control_v11p_sd15_softedge",
333
- "lllyasviel/control_v11p_sd15_seg",
334
- "lllyasviel/control_v11f1p_sd15_depth",
335
- "lllyasviel/control_v11p_sd15_normalbae",
336
- "lllyasviel/control_v11p_sd15_lineart",
337
- "lllyasviel/control_v11p_sd15s2_lineart_anime",
338
- "lllyasviel/control_v11e_sd15_shuffle",
339
- "lllyasviel/control_v11e_sd15_ip2p",
340
- "lllyasviel/control_v11p_sd15_inpaint",
341
- "monster-labs/control_v1p_sd15_qrcode_monster",
342
- "lllyasviel/control_v11f1e_sd15_tile",
343
- "latentcat/control_v1p_sd15_brightness",
344
- "yuanqiuye/qrcode_controlnet_v3",
345
-
346
- "Shakker-Labs/FLUX.1-dev-ControlNet-Union-Pro",
347
- # "Shakker-Labs/FLUX.1-dev-ControlNet-Pose",
348
- # "Shakker-Labs/FLUX.1-dev-ControlNet-Depth",
349
- # "jasperai/Flux.1-dev-Controlnet-Upscaler",
350
- # "jasperai/Flux.1-dev-Controlnet-Depth",
351
- # "jasperai/Flux.1-dev-Controlnet-Surface-Normals",
352
- # "XLabs-AI/flux-controlnet-canny-diffusers",
353
- # "XLabs-AI/flux-controlnet-hed-diffusers",
354
- # "XLabs-AI/flux-controlnet-depth-diffusers",
355
- # "InstantX/FLUX.1-dev-Controlnet-Union",
356
- # "InstantX/FLUX.1-dev-Controlnet-Canny",
357
- ]
358
-
359
- PROMPT_W_OPTIONS = [(pwf, pwf) for pwf in PROMPT_WEIGHT_OPTIONS_PRIORITY]
360
- PROMPT_W_OPTIONS[0] = ("Classic format: (word:weight)", "Classic")
361
- PROMPT_W_OPTIONS[1] = ("Compel format: (word)weight", "Compel")
362
-
363
- WARNING_MSG_VAE = (
364
- "Use the right VAE for your model to maintain image quality. The wrong"
365
- " VAE can lead to poor results, like blurriness in the generated images."
366
- )
367
-
368
- SDXL_TASK = [k for k, v in TASK_STABLEPY.items() if v in SDXL_TASKS]
369
- SD_TASK = [k for k, v in TASK_STABLEPY.items() if v in SD15_TASKS]
370
- FLUX_TASK = list(TASK_STABLEPY.keys())[:3] + [k for k, v in TASK_STABLEPY.items() if v in FLUX_CN_UNION_MODES.keys()]
371
-
372
- MODEL_TYPE_TASK = {
373
- "SD 1.5": SD_TASK,
374
- "SDXL": SDXL_TASK,
375
- "FLUX": FLUX_TASK,
376
- }
377
-
378
- MODEL_TYPE_CLASS = {
379
- "diffusers:StableDiffusionPipeline": "SD 1.5",
380
- "diffusers:StableDiffusionXLPipeline": "SDXL",
381
- "diffusers:FluxPipeline": "FLUX",
382
- }
383
-
384
- DIFFUSECRAFT_CHECKPOINT_NAME = {
385
- "sd1.5": "SD 1.5",
386
- "sdxl": "SDXL",
387
- "flux-dev": "FLUX",
388
- "flux-schnell": "FLUX",
389
- }
390
-
391
- POST_PROCESSING_SAMPLER = ["Use same sampler"] + [
392
- name_s for name_s in scheduler_names if "Auto-Loader" not in name_s
393
- ]
394
-
395
- IP_MODELS = []
396
- ALL_IPA = sorted(set(IP_ADAPTERS_SD + IP_ADAPTERS_SDXL))
397
-
398
- for origin_name in ALL_IPA:
399
- suffixes = []
400
- if origin_name in IP_ADAPTERS_SD:
401
- suffixes.append("sd1.5")
402
- if origin_name in IP_ADAPTERS_SDXL:
403
- suffixes.append("sdxl")
404
- ref_name = f"{origin_name} ({'/'.join(suffixes)})"
405
- IP_MODELS.append((ref_name, origin_name))
406
-
407
- MODE_IP_OPTIONS = ["original", "style", "layout", "style+layout"]
408
-
409
- SUBTITLE_GUI = (
410
- "### This demo uses [diffusers](https://github.com/huggingface/diffusers)"
411
- " to perform different tasks in image generation."
412
- )
413
-
414
- msg_zero = "" if not IS_ZERO_GPU else "- The current space runs on a ZERO GPU which is assigned for approximately 60 seconds; Therefore, if you submit expensive tasks, the operation may be canceled upon reaching the maximum allowed time with 'GPU TASK ABORTED'."
415
-
416
- HELP_GUI = (
417
- f"""### Help:
418
- {msg_zero}
419
- - Distorted or strange images often result from high prompt weights, so it's best to use low weights and scales, and consider using Classic variants like 'Classic-original'.
420
- - For better results with Pony Diffusion, try using sampler DPM++ 1s or DPM2 with Compel or Classic prompt weights.
421
- """
422
- )
423
-
424
- EXAMPLES_GUI_HELP = (
425
- """### The following examples perform specific tasks:
426
- 1. Generation with SDXL and upscale
427
- 2. Generation with FLUX dev
428
- 3. ControlNet Canny SDXL
429
- 4. Optical pattern (Optical illusion) SDXL
430
- 5. Convert an image to a coloring drawing
431
- 6. V prediction model inference
432
- 7. V prediction model sd_embed variant inference
433
- 8. ControlNet OpenPose SD 1.5 and Latent upscale
434
-
435
- - Different tasks can be performed, such as img2img or using the IP adapter, to preserve a person's appearance or a specific style based on an image.
436
- """
437
- )
438
-
439
- EXAMPLES_GUI = [
440
- [
441
- "splatter paint theme, 1girl, frame center, pretty face, face with artistic paint artwork, feminism, long hair, upper body view, futuristic expression illustrative painted background, origami, stripes, explosive paint splashes behind her, hand on cheek pose, strobe lighting, masterpiece photography creative artwork, golden morning light, highly detailed, masterpiece, best quality, very aesthetic, absurdres",
442
- "logo, artist name, (worst quality, normal quality), bad-artist, ((bad anatomy)), ((bad hands)), ((bad proportions)), ((duplicate limbs)), ((fused limbs)), ((interlocking fingers)), ((poorly drawn face)), high contrast., score_6, score_5, score_4, lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]",
443
- 28,
444
- 5.0,
445
- -1,
446
- "None",
447
- 0.33,
448
- "DPM++ 2M SDE",
449
- 1152,
450
- 896,
451
- "John6666/noobai-xl-nai-xl-epsilonpred10version-sdxl",
452
- "txt2img",
453
- "image.webp", # img conttol
454
- 1024, # img resolution
455
- 0.35, # strength
456
- 1.0, # cn scale
457
- 0.0, # cn start
458
- 1.0, # cn end
459
- "Classic-no_norm",
460
- "Nearest",
461
- 45,
462
- False,
463
- ],
464
- [
465
- "a digital illustration of a movie poster titled 'Finding Emo', finding nemo parody poster, featuring a depressed cartoon clownfish with black emo hair, eyeliner, and piercings, bored expression, swimming in a dark underwater scene, in the background, movie title in a dripping, grungy font, moody blue and purple color palette",
466
- "",
467
- 24,
468
- 3.5,
469
- -1,
470
- "None",
471
- 0.33,
472
- "FlowMatch Euler",
473
- 1152,
474
- 896,
475
- "black-forest-labs/FLUX.1-dev",
476
- "txt2img",
477
- None, # img conttol
478
- 1024, # img resolution
479
- 0.35, # strength
480
- 1.0, # cn scale
481
- 0.0, # cn start
482
- 1.0, # cn end
483
- "Classic",
484
- None,
485
- 70,
486
- True,
487
- ],
488
- [
489
- "((masterpiece)), best quality, blonde disco girl, detailed face, realistic face, realistic hair, dynamic pose, pink pvc, intergalactic disco background, pastel lights, dynamic contrast, airbrush, fine detail, 70s vibe, midriff",
490
- "(worst quality:1.2), (bad quality:1.2), (poor quality:1.2), (missing fingers:1.2), bad-artist-anime, bad-artist, bad-picture-chill-75v",
491
- 48,
492
- 3.5,
493
- -1,
494
- "None",
495
- 0.33,
496
- "DPM++ 2M SDE Ef",
497
- 1024,
498
- 1024,
499
- "John6666/epicrealism-xl-v10kiss2-sdxl",
500
- "canny ControlNet",
501
- "image.webp", # img conttol
502
- 1024, # img resolution
503
- 0.35, # strength
504
- 1.0, # cn scale
505
- 0.0, # cn start
506
- 1.0, # cn end
507
- "Classic",
508
- None,
509
- 44,
510
- False,
511
- ],
512
- [
513
- "cinematic scenery old city ruins",
514
- "(worst quality, low quality, illustration, 3d, 2d, painting, cartoons, sketch), (illustration, 3d, 2d, painting, cartoons, sketch, blurry, film grain, noise), (low quality, worst quality:1.2)",
515
- 50,
516
- 4.0,
517
- -1,
518
- "None",
519
- 0.33,
520
- "Euler a",
521
- 1024,
522
- 1024,
523
- "SG161222/RealVisXL_V5.0",
524
- "optical pattern ControlNet",
525
- "spiral_no_transparent.png", # img conttol
526
- 1024, # img resolution
527
- 0.35, # strength
528
- 1.0, # cn scale
529
- 0.05, # cn start
530
- 0.8, # cn end
531
- "Classic",
532
- None,
533
- 35,
534
- False,
535
- ],
536
- [
537
- "black and white, line art, coloring drawing, clean line art, black strokes, no background, white, black, free lines, black scribbles, on paper, A blend of comic book art and lineart full of black and white color, masterpiece, high-resolution, trending on Pixiv fan box, palette knife, brush strokes, two-dimensional, planar vector, T-shirt design, stickers, and T-shirt design, vector art, fantasy art, Adobe Illustrator, hand-painted, digital painting, low polygon, soft lighting, aerial view, isometric style, retro aesthetics, 8K resolution, black sketch lines, monochrome, invert color",
538
- "color, red, green, yellow, colored, duplicate, blurry, abstract, disfigured, deformed, animated, toy, figure, framed, 3d, bad art, poorly drawn, extra limbs, close up, b&w, weird colors, blurry, watermark, blur haze, 2 heads, long neck, watermark, elongated body, cropped image, out of frame, draft, deformed hands, twisted fingers, double image, malformed hands, multiple heads, extra limb, ugly, poorly drawn hands, missing limb, cut-off, over satured, grain, lowères, bad anatomy, poorly drawn face, mutation, mutated, floating limbs, disconnected limbs, out of focus, long body, disgusting, extra fingers, groos proportions, missing arms, mutated hands, cloned face, missing legs, ugly, tiling, poorly drawn hands, poorly drawn feet, poorly drawn face, out of frame, extra limbs, disfigured, deformed, body out of frame, blurry, bad anatomy, blurred, watermark, grainy, signature, cut off, draft, deformed, blurry, bad anatomy, disfigured, poorly drawn face, mutation, bluelish, blue",
539
- 20,
540
- 4.0,
541
- -1,
542
- ("loras/Coloring_book_-_LineArt.safetensors" if os.path.exists("loras/Coloring_book_-_LineArt.safetensors") else "None"),
543
- 1.0,
544
- "DPM++ 2M SDE",
545
- 1024,
546
- 1024,
547
- "eienmojiki/Anything-XL",
548
- "lineart ControlNet",
549
- "color_image.png", # img conttol
550
- 896, # img resolution
551
- 0.35, # strength
552
- 1.0, # cn scale
553
- 0.0, # cn start
554
- 1.0, # cn end
555
- "Compel",
556
- None,
557
- 35,
558
- False,
559
- ],
560
- [
561
- "[mochizuki_shiina], [syuri22], newest, reimu, solo, outdoors, water, flower, lantern",
562
- "worst quality, normal quality, old, sketch,",
563
- 28,
564
- 7.0,
565
- -1,
566
- "None",
567
- 0.33,
568
- "DPM 3M Ef",
569
- 1600,
570
- 1024,
571
- "Laxhar/noobai-XL-Vpred-1.0",
572
- "txt2img",
573
- "color_image.png", # img conttol
574
- 1024, # img resolution
575
- 0.35, # strength
576
- 1.0, # cn scale
577
- 0.0, # cn start
578
- 1.0, # cn end
579
- "Classic",
580
- None,
581
- 30,
582
- False,
583
- ],
584
- [
585
- "[mochizuki_shiina], [syuri22], newest, multiple girls, 2girls, earrings, jewelry, gloves, purple eyes, black hair, looking at viewer, nail polish, hat, smile, open mouth, fingerless gloves, sleeveless, :d, upper body, blue eyes, closed mouth, black gloves, hands up, long hair, shirt, bare shoulders, white headwear, blush, black headwear, blue nails, upper teeth only, short hair, white gloves, white shirt, teeth, rabbit hat, star earrings, purple nails, pink hair, detached sleeves, fingernails, fake animal ears, animal hat, sleeves past wrists, black shirt, medium hair, fur trim, sleeveless shirt, turtleneck, long sleeves, rabbit ears, star \\(symbol\\)",
586
- "worst quality, normal quality, old, sketch,",
587
- 28,
588
- 7.0,
589
- -1,
590
- "None",
591
- 0.33,
592
- "DPM 3M Ef",
593
- 1600,
594
- 1024,
595
- "Laxhar/noobai-XL-Vpred-1.0",
596
- "txt2img",
597
- "color_image.png", # img conttol
598
- 1024, # img resolution
599
- 0.35, # strength
600
- 1.0, # cn scale
601
- 0.0, # cn start
602
- 1.0, # cn end
603
- "Classic-sd_embed",
604
- None,
605
- 30,
606
- False,
607
- ],
608
- [
609
- "1girl,face,curly hair,red hair,white background,",
610
- "(worst quality:2),(low quality:2),(normal quality:2),lowres,watermark,",
611
- 38,
612
- 5.0,
613
- -1,
614
- "None",
615
- 0.33,
616
- "DPM++ 2M SDE",
617
- 512,
618
- 512,
619
- "digiplay/majicMIX_realistic_v7",
620
- "openpose ControlNet",
621
- "image.webp", # img conttol
622
- 1024, # img resolution
623
- 0.35, # strength
624
- 1.0, # cn scale
625
- 0.0, # cn start
626
- 0.9, # cn end
627
- "Classic-original",
628
- "Latent (antialiased)",
629
- 46,
630
- False,
631
- ],
632
- ]
633
-
634
- RESOURCES = (
635
- """### Resources
636
- - John6666's space has some great features you might find helpful [link](https://huggingface.co/spaces/John6666/DiffuseCraftMod).
637
- - Try the image generator in Colab’s free tier, which provides free GPU [link](https://github.com/R3gm/SD_diffusers_interactive).
638
- - `DiffuseCraft` in Colab:[link](https://github.com/R3gm/DiffuseCraft?tab=readme-ov-file#diffusecraft).
639
- """
640
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
image_processor.py DELETED
@@ -1,130 +0,0 @@
1
- import spaces
2
- import gradio as gr
3
- from stablepy import Preprocessor
4
-
5
- PREPROCESSOR_TASKS_LIST = [
6
- "Canny",
7
- "Openpose",
8
- "DPT",
9
- "Midas",
10
- "ZoeDepth",
11
- "DepthAnything",
12
- "HED",
13
- "PidiNet",
14
- "TEED",
15
- "Lineart",
16
- "LineartAnime",
17
- "Anyline",
18
- "Lineart standard",
19
- "SegFormer",
20
- "UPerNet",
21
- "ContentShuffle",
22
- "Recolor",
23
- "Blur",
24
- "MLSD",
25
- "NormalBae",
26
- ]
27
-
28
- preprocessor = Preprocessor()
29
-
30
-
31
- def process_inputs(
32
- image,
33
- name,
34
- resolution,
35
- precessor_resolution,
36
- low_threshold,
37
- high_threshold,
38
- value_threshod,
39
- distance_threshold,
40
- recolor_mode,
41
- recolor_gamma_correction,
42
- blur_k_size,
43
- pre_openpose_extra,
44
- hed_scribble,
45
- pre_pidinet_safe,
46
- pre_lineart_coarse,
47
- use_cuda,
48
- ):
49
- if not image:
50
- raise ValueError("To use this, simply upload an image.")
51
-
52
- preprocessor.load(name, False)
53
-
54
- params = dict(
55
- image_resolution=resolution,
56
- detect_resolution=precessor_resolution,
57
- low_threshold=low_threshold,
58
- high_threshold=high_threshold,
59
- thr_v=value_threshod,
60
- thr_d=distance_threshold,
61
- mode=recolor_mode,
62
- gamma_correction=recolor_gamma_correction,
63
- blur_sigma=blur_k_size,
64
- hand_and_face=pre_openpose_extra,
65
- scribble=hed_scribble,
66
- safe=pre_pidinet_safe,
67
- coarse=pre_lineart_coarse,
68
- )
69
-
70
- if use_cuda:
71
- @spaces.GPU(duration=15)
72
- def wrapped_func():
73
- preprocessor.to("cuda")
74
- return preprocessor(image, **params)
75
- return wrapped_func()
76
-
77
- return preprocessor(image, **params)
78
-
79
-
80
- def preprocessor_tab():
81
- with gr.Row():
82
- with gr.Column():
83
- pre_image = gr.Image(label="Image", type="pil", sources=["upload"])
84
- pre_options = gr.Dropdown(label="Preprocessor", choices=PREPROCESSOR_TASKS_LIST, value=PREPROCESSOR_TASKS_LIST[0])
85
- pre_img_resolution = gr.Slider(
86
- minimum=64, maximum=4096, step=64, value=1024, label="Image Resolution",
87
- info="The maximum proportional size of the generated image based on the uploaded image."
88
- )
89
- pre_start = gr.Button(value="PROCESS IMAGE", variant="primary")
90
- with gr.Accordion("Advanced Settings", open=False):
91
- with gr.Column():
92
- pre_processor_resolution = gr.Slider(minimum=64, maximum=2048, step=64, value=512, label="Preprocessor Resolution")
93
- pre_low_threshold = gr.Slider(minimum=1, maximum=255, step=1, value=100, label="'CANNY' low threshold")
94
- pre_high_threshold = gr.Slider(minimum=1, maximum=255, step=1, value=200, label="'CANNY' high threshold")
95
- pre_value_threshold = gr.Slider(minimum=0., maximum=2.0, step=0.01, value=0.1, label="'MLSD' Hough value threshold")
96
- pre_distance_threshold = gr.Slider(minimum=0., maximum=20.0, step=0.01, value=0.1, label="'MLSD' Hough distance threshold")
97
- pre_recolor_mode = gr.Dropdown(label="'RECOLOR' mode", choices=["luminance", "intensity"], value="luminance")
98
- pre_recolor_gamma_correction = gr.Number(minimum=0., maximum=25., value=1., step=0.001, label="'RECOLOR' gamma correction")
99
- pre_blur_k_size = gr.Number(minimum=0, maximum=100, value=9, step=1, label="'BLUR' sigma")
100
- pre_openpose_extra = gr.Checkbox(value=True, label="'OPENPOSE' face and hand")
101
- pre_hed_scribble = gr.Checkbox(value=False, label="'HED' scribble")
102
- pre_pidinet_safe = gr.Checkbox(value=False, label="'PIDINET' safe")
103
- pre_lineart_coarse = gr.Checkbox(value=False, label="'LINEART' coarse")
104
- pre_use_cuda = gr.Checkbox(value=False, label="Use CUDA")
105
-
106
- with gr.Column():
107
- pre_result = gr.Image(label="Result", type="pil", interactive=False, format="png")
108
-
109
- pre_start.click(
110
- fn=process_inputs,
111
- inputs=[
112
- pre_image,
113
- pre_options,
114
- pre_img_resolution,
115
- pre_processor_resolution,
116
- pre_low_threshold,
117
- pre_high_threshold,
118
- pre_value_threshold,
119
- pre_distance_threshold,
120
- pre_recolor_mode,
121
- pre_recolor_gamma_correction,
122
- pre_blur_k_size,
123
- pre_openpose_extra,
124
- pre_hed_scribble,
125
- pre_pidinet_safe,
126
- pre_lineart_coarse,
127
- pre_use_cuda,
128
- ],
129
- outputs=[pre_result],
130
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lora_dict.json ADDED
File without changes
model_dict.json ADDED
File without changes
modutils.py ADDED
@@ -0,0 +1,1290 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import json
3
+ import gradio as gr
4
+ from huggingface_hub import HfApi
5
+ import os
6
+ from pathlib import Path
7
+ from PIL import Image
8
+
9
+
10
+ from env import (HF_LORA_PRIVATE_REPOS1, HF_LORA_PRIVATE_REPOS2,
11
+ HF_MODEL_USER_EX, HF_MODEL_USER_LIKES, DIFFUSERS_FORMAT_LORAS,
12
+ directory_loras, hf_read_token, HF_TOKEN, CIVITAI_API_KEY)
13
+
14
+
15
+ MODEL_TYPE_DICT = {
16
+ "diffusers:StableDiffusionPipeline": "SD 1.5",
17
+ "diffusers:StableDiffusionXLPipeline": "SDXL",
18
+ "diffusers:FluxPipeline": "FLUX",
19
+ }
20
+
21
+
22
+ def get_user_agent():
23
+ return 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:127.0) Gecko/20100101 Firefox/127.0'
24
+
25
+
26
+ def to_list(s):
27
+ return [x.strip() for x in s.split(",") if not s == ""]
28
+
29
+
30
+ def list_uniq(l):
31
+ return sorted(set(l), key=l.index)
32
+
33
+
34
+ def list_sub(a, b):
35
+ return [e for e in a if e not in b]
36
+
37
+
38
+ def is_repo_name(s):
39
+ import re
40
+ return re.fullmatch(r'^[^/]+?/[^/]+?$', s)
41
+
42
+
43
+ from translatepy import Translator
44
+ translator = Translator()
45
+ def translate_to_en(input: str):
46
+ try:
47
+ output = str(translator.translate(input, 'English'))
48
+ except Exception as e:
49
+ output = input
50
+ print(e)
51
+ return output
52
+
53
+
54
+ def get_local_model_list(dir_path):
55
+ model_list = []
56
+ valid_extensions = ('.ckpt', '.pt', '.pth', '.safetensors', '.bin')
57
+ for file in Path(dir_path).glob("*"):
58
+ if file.suffix in valid_extensions:
59
+ file_path = str(Path(f"{dir_path}/{file.name}"))
60
+ model_list.append(file_path)
61
+ return model_list
62
+
63
+
64
+ def download_things(directory, url, hf_token="", civitai_api_key=""):
65
+ url = url.strip()
66
+ if "drive.google.com" in url:
67
+ original_dir = os.getcwd()
68
+ os.chdir(directory)
69
+ os.system(f"gdown --fuzzy {url}")
70
+ os.chdir(original_dir)
71
+ elif "huggingface.co" in url:
72
+ url = url.replace("?download=true", "")
73
+ # url = urllib.parse.quote(url, safe=':/') # fix encoding
74
+ if "/blob/" in url:
75
+ url = url.replace("/blob/", "/resolve/")
76
+ user_header = f'"Authorization: Bearer {hf_token}"'
77
+ if hf_token:
78
+ os.system(f"aria2c --console-log-level=error --summary-interval=10 --header={user_header} -c -x 16 -k 1M -s 16 {url} -d {directory} -o {url.split('/')[-1]}")
79
+ else:
80
+ os.system(f"aria2c --optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 {url} -d {directory} -o {url.split('/')[-1]}")
81
+ elif "civitai.com" in url:
82
+ if "?" in url:
83
+ url = url.split("?")[0]
84
+ if civitai_api_key:
85
+ url = url + f"?token={civitai_api_key}"
86
+ os.system(f"aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 -d {directory} {url}")
87
+ else:
88
+ print("\033[91mYou need an API key to download Civitai models.\033[0m")
89
+ else:
90
+ os.system(f"aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 -d {directory} {url}")
91
+
92
+
93
+ def escape_lora_basename(basename: str):
94
+ return basename.replace(".", "_").replace(" ", "_").replace(",", "")
95
+
96
+
97
+ def to_lora_key(path: str):
98
+ return escape_lora_basename(Path(path).stem)
99
+
100
+
101
+ def to_lora_path(key: str):
102
+ if Path(key).is_file(): return key
103
+ path = Path(f"{directory_loras}/{escape_lora_basename(key)}.safetensors")
104
+ return str(path)
105
+
106
+
107
+ def safe_float(input):
108
+ output = 1.0
109
+ try:
110
+ output = float(input)
111
+ except Exception:
112
+ output = 1.0
113
+ return output
114
+
115
+
116
+ def save_images(images: list[Image.Image], metadatas: list[str]):
117
+ from PIL import PngImagePlugin
118
+ import uuid
119
+ try:
120
+ output_images = []
121
+ for image, metadata in zip(images, metadatas):
122
+ info = PngImagePlugin.PngInfo()
123
+ info.add_text("parameters", metadata)
124
+ savefile = f"{str(uuid.uuid4())}.png"
125
+ image.save(savefile, "PNG", pnginfo=info)
126
+ output_images.append(str(Path(savefile).resolve()))
127
+ return output_images
128
+ except Exception as e:
129
+ print(f"Failed to save image file: {e}")
130
+ raise Exception(f"Failed to save image file:") from e
131
+
132
+
133
+ def save_gallery_images(images, progress=gr.Progress(track_tqdm=True)):
134
+ from datetime import datetime, timezone, timedelta
135
+ progress(0, desc="Updating gallery...")
136
+ dt_now = datetime.now(timezone(timedelta(hours=9)))
137
+ basename = dt_now.strftime('%Y%m%d_%H%M%S_')
138
+ i = 1
139
+ if not images: return images, gr.update(visible=False)
140
+ output_images = []
141
+ output_paths = []
142
+ for image in images:
143
+ filename = basename + str(i) + ".png"
144
+ i += 1
145
+ oldpath = Path(image[0])
146
+ newpath = oldpath
147
+ try:
148
+ if oldpath.exists():
149
+ newpath = oldpath.resolve().rename(Path(filename).resolve())
150
+ except Exception as e:
151
+ print(e)
152
+ finally:
153
+ output_paths.append(str(newpath))
154
+ output_images.append((str(newpath), str(filename)))
155
+ progress(1, desc="Gallery updated.")
156
+ return gr.update(value=output_images), gr.update(value=output_paths, visible=True)
157
+
158
+
159
+ def download_private_repo(repo_id, dir_path, is_replace):
160
+ from huggingface_hub import snapshot_download
161
+ if not hf_read_token: return
162
+ try:
163
+ snapshot_download(repo_id=repo_id, local_dir=dir_path, allow_patterns=['*.ckpt', '*.pt', '*.pth', '*.safetensors', '*.bin'], use_auth_token=hf_read_token)
164
+ except Exception as e:
165
+ print(f"Error: Failed to download {repo_id}.")
166
+ print(e)
167
+ return
168
+ if is_replace:
169
+ for file in Path(dir_path).glob("*"):
170
+ if file.exists() and "." in file.stem or " " in file.stem and file.suffix in ['.ckpt', '.pt', '.pth', '.safetensors', '.bin']:
171
+ newpath = Path(f'{file.parent.name}/{escape_lora_basename(file.stem)}{file.suffix}')
172
+ file.resolve().rename(newpath.resolve())
173
+
174
+
175
+ private_model_path_repo_dict = {} # {"local filepath": "huggingface repo_id", ...}
176
+
177
+
178
+ def get_private_model_list(repo_id, dir_path):
179
+ global private_model_path_repo_dict
180
+ api = HfApi()
181
+ if not hf_read_token: return []
182
+ try:
183
+ files = api.list_repo_files(repo_id, token=hf_read_token)
184
+ except Exception as e:
185
+ print(f"Error: Failed to list {repo_id}.")
186
+ print(e)
187
+ return []
188
+ model_list = []
189
+ for file in files:
190
+ path = Path(f"{dir_path}/{file}")
191
+ if path.suffix in ['.ckpt', '.pt', '.pth', '.safetensors', '.bin']:
192
+ model_list.append(str(path))
193
+ for model in model_list:
194
+ private_model_path_repo_dict[model] = repo_id
195
+ return model_list
196
+
197
+
198
+ def download_private_file(repo_id, path, is_replace):
199
+ from huggingface_hub import hf_hub_download
200
+ file = Path(path)
201
+ newpath = Path(f'{file.parent.name}/{escape_lora_basename(file.stem)}{file.suffix}') if is_replace else file
202
+ if not hf_read_token or newpath.exists(): return
203
+ filename = file.name
204
+ dirname = file.parent.name
205
+ try:
206
+ hf_hub_download(repo_id=repo_id, filename=filename, local_dir=dirname, use_auth_token=hf_read_token)
207
+ except Exception as e:
208
+ print(f"Error: Failed to download {filename}.")
209
+ print(e)
210
+ return
211
+ if is_replace:
212
+ file.resolve().rename(newpath.resolve())
213
+
214
+
215
+ def download_private_file_from_somewhere(path, is_replace):
216
+ if not path in private_model_path_repo_dict.keys(): return
217
+ repo_id = private_model_path_repo_dict.get(path, None)
218
+ download_private_file(repo_id, path, is_replace)
219
+
220
+
221
+ model_id_list = []
222
+ def get_model_id_list():
223
+ global model_id_list
224
+ if len(model_id_list) != 0: return model_id_list
225
+ api = HfApi()
226
+ model_ids = []
227
+ try:
228
+ models_likes = []
229
+ for author in HF_MODEL_USER_LIKES:
230
+ models_likes.extend(api.list_models(author=author, task="text-to-image", cardData=True, sort="likes"))
231
+ models_ex = []
232
+ for author in HF_MODEL_USER_EX:
233
+ models_ex = api.list_models(author=author, task="text-to-image", cardData=True, sort="last_modified")
234
+ except Exception as e:
235
+ print(f"Error: Failed to list {author}'s models.")
236
+ print(e)
237
+ return model_ids
238
+ for model in models_likes:
239
+ model_ids.append(model.id) if not model.private else ""
240
+ anime_models = []
241
+ real_models = []
242
+ anime_models_flux = []
243
+ real_models_flux = []
244
+ for model in models_ex:
245
+ if not model.private and not model.gated:
246
+ if "diffusers:FluxPipeline" in model.tags: anime_models_flux.append(model.id) if "anime" in model.tags else real_models_flux.append(model.id)
247
+ else: anime_models.append(model.id) if "anime" in model.tags else real_models.append(model.id)
248
+ model_ids.extend(anime_models)
249
+ model_ids.extend(real_models)
250
+ model_ids.extend(anime_models_flux)
251
+ model_ids.extend(real_models_flux)
252
+ model_id_list = model_ids.copy()
253
+ return model_ids
254
+
255
+
256
+ model_id_list = get_model_id_list()
257
+
258
+
259
+ def get_t2i_model_info(repo_id: str):
260
+ api = HfApi(token=HF_TOKEN)
261
+ try:
262
+ if not is_repo_name(repo_id): return ""
263
+ model = api.model_info(repo_id=repo_id, timeout=5.0)
264
+ except Exception as e:
265
+ print(f"Error: Failed to get {repo_id}'s info.")
266
+ print(e)
267
+ return ""
268
+ if model.private or model.gated: return ""
269
+ tags = model.tags
270
+ info = []
271
+ url = f"https://huggingface.co/{repo_id}/"
272
+ if not 'diffusers' in tags: return ""
273
+ for k, v in MODEL_TYPE_DICT.items():
274
+ if k in tags: info.append(v)
275
+ if model.card_data and model.card_data.tags:
276
+ info.extend(list_sub(model.card_data.tags, ['text-to-image', 'stable-diffusion', 'stable-diffusion-api', 'safetensors', 'stable-diffusion-xl']))
277
+ info.append(f"DLs: {model.downloads}")
278
+ info.append(f"likes: {model.likes}")
279
+ info.append(model.last_modified.strftime("lastmod: %Y-%m-%d"))
280
+ md = f"Model Info: {', '.join(info)}, [Model Repo]({url})"
281
+ return gr.update(value=md)
282
+
283
+
284
+ def get_tupled_model_list(model_list):
285
+ if not model_list: return []
286
+ tupled_list = []
287
+ for repo_id in model_list:
288
+ api = HfApi()
289
+ try:
290
+ if not api.repo_exists(repo_id): continue
291
+ model = api.model_info(repo_id=repo_id)
292
+ except Exception as e:
293
+ print(e)
294
+ continue
295
+ if model.private or model.gated: continue
296
+ tags = model.tags
297
+ info = []
298
+ if not 'diffusers' in tags: continue
299
+ for k, v in MODEL_TYPE_DICT.items():
300
+ if k in tags: info.append(v)
301
+ if model.card_data and model.card_data.tags:
302
+ info.extend(list_sub(model.card_data.tags, ['text-to-image', 'stable-diffusion', 'stable-diffusion-api', 'safetensors', 'stable-diffusion-xl']))
303
+ if "pony" in info:
304
+ info.remove("pony")
305
+ name = f"{repo_id} (Pony🐴, {', '.join(info)})"
306
+ else:
307
+ name = f"{repo_id} ({', '.join(info)})"
308
+ tupled_list.append((name, repo_id))
309
+ return tupled_list
310
+
311
+
312
+ private_lora_dict = {}
313
+ try:
314
+ with open('lora_dict.json', encoding='utf-8') as f:
315
+ d = json.load(f)
316
+ for k, v in d.items():
317
+ private_lora_dict[escape_lora_basename(k)] = v
318
+ except Exception as e:
319
+ print(e)
320
+ loras_dict = {"None": ["", "", "", "", ""], "": ["", "", "", "", ""]} | private_lora_dict.copy()
321
+ civitai_not_exists_list = []
322
+ loras_url_to_path_dict = {} # {"URL to download": "local filepath", ...}
323
+ civitai_lora_last_results = {} # {"URL to download": {search results}, ...}
324
+ all_lora_list = []
325
+
326
+
327
+ private_lora_model_list = []
328
+ def get_private_lora_model_lists():
329
+ global private_lora_model_list
330
+ if len(private_lora_model_list) != 0: return private_lora_model_list
331
+ models1 = []
332
+ models2 = []
333
+ for repo in HF_LORA_PRIVATE_REPOS1:
334
+ models1.extend(get_private_model_list(repo, directory_loras))
335
+ for repo in HF_LORA_PRIVATE_REPOS2:
336
+ models2.extend(get_private_model_list(repo, directory_loras))
337
+ models = list_uniq(models1 + sorted(models2))
338
+ private_lora_model_list = models.copy()
339
+ return models
340
+
341
+
342
+ private_lora_model_list = get_private_lora_model_lists()
343
+
344
+
345
+ def get_civitai_info(path):
346
+ global civitai_not_exists_list
347
+ import requests
348
+ from urllib3.util import Retry
349
+ from requests.adapters import HTTPAdapter
350
+ if path in set(civitai_not_exists_list): return ["", "", "", "", ""]
351
+ if not Path(path).exists(): return None
352
+ user_agent = get_user_agent()
353
+ headers = {'User-Agent': user_agent, 'content-type': 'application/json'}
354
+ base_url = 'https://civitai.com/api/v1/model-versions/by-hash/'
355
+ params = {}
356
+ session = requests.Session()
357
+ retries = Retry(total=5, backoff_factor=1, status_forcelist=[500, 502, 503, 504])
358
+ session.mount("https://", HTTPAdapter(max_retries=retries))
359
+ import hashlib
360
+ with open(path, 'rb') as file:
361
+ file_data = file.read()
362
+ hash_sha256 = hashlib.sha256(file_data).hexdigest()
363
+ url = base_url + hash_sha256
364
+ try:
365
+ r = session.get(url, params=params, headers=headers, stream=True, timeout=(3.0, 15))
366
+ except Exception as e:
367
+ print(e)
368
+ return ["", "", "", "", ""]
369
+ if not r.ok: return None
370
+ json = r.json()
371
+ if not 'baseModel' in json:
372
+ civitai_not_exists_list.append(path)
373
+ return ["", "", "", "", ""]
374
+ items = []
375
+ items.append(" / ".join(json['trainedWords']))
376
+ items.append(json['baseModel'])
377
+ items.append(json['model']['name'])
378
+ items.append(f"https://civitai.com/models/{json['modelId']}")
379
+ items.append(json['images'][0]['url'])
380
+ return items
381
+
382
+
383
+ def get_lora_model_list():
384
+ loras = list_uniq(get_private_lora_model_lists() + get_local_model_list(directory_loras) + DIFFUSERS_FORMAT_LORAS)
385
+ loras.insert(0, "None")
386
+ loras.insert(0, "")
387
+ return loras
388
+
389
+
390
+ def get_all_lora_list():
391
+ global all_lora_list
392
+ loras = get_lora_model_list()
393
+ all_lora_list = loras.copy()
394
+ return loras
395
+
396
+
397
+ def get_all_lora_tupled_list():
398
+ global loras_dict
399
+ models = get_all_lora_list()
400
+ if not models: return []
401
+ tupled_list = []
402
+ for model in models:
403
+ #if not model: continue # to avoid GUI-related bug
404
+ basename = Path(model).stem
405
+ key = to_lora_key(model)
406
+ items = None
407
+ if key in loras_dict.keys():
408
+ items = loras_dict.get(key, None)
409
+ else:
410
+ items = get_civitai_info(model)
411
+ if items != None:
412
+ loras_dict[key] = items
413
+ name = basename
414
+ value = model
415
+ if items and items[2] != "":
416
+ if items[1] == "Pony":
417
+ name = f"{basename} (for {items[1]}🐴, {items[2]})"
418
+ else:
419
+ name = f"{basename} (for {items[1]}, {items[2]})"
420
+ tupled_list.append((name, value))
421
+ return tupled_list
422
+
423
+
424
+ def update_lora_dict(path):
425
+ global loras_dict
426
+ key = escape_lora_basename(Path(path).stem)
427
+ if key in loras_dict.keys(): return
428
+ items = get_civitai_info(path)
429
+ if items == None: return
430
+ loras_dict[key] = items
431
+
432
+
433
+ def download_lora(dl_urls: str):
434
+ global loras_url_to_path_dict
435
+ dl_path = ""
436
+ before = get_local_model_list(directory_loras)
437
+ urls = []
438
+ for url in [url.strip() for url in dl_urls.split(',')]:
439
+ local_path = f"{directory_loras}/{url.split('/')[-1]}"
440
+ if not Path(local_path).exists():
441
+ download_things(directory_loras, url, HF_TOKEN, CIVITAI_API_KEY)
442
+ urls.append(url)
443
+ after = get_local_model_list(directory_loras)
444
+ new_files = list_sub(after, before)
445
+ i = 0
446
+ for file in new_files:
447
+ path = Path(file)
448
+ if path.exists():
449
+ new_path = Path(f'{path.parent.name}/{escape_lora_basename(path.stem)}{path.suffix}')
450
+ path.resolve().rename(new_path.resolve())
451
+ loras_url_to_path_dict[urls[i]] = str(new_path)
452
+ update_lora_dict(str(new_path))
453
+ dl_path = str(new_path)
454
+ i += 1
455
+ return dl_path
456
+
457
+
458
+ def copy_lora(path: str, new_path: str):
459
+ import shutil
460
+ if path == new_path: return new_path
461
+ cpath = Path(path)
462
+ npath = Path(new_path)
463
+ if cpath.exists():
464
+ try:
465
+ shutil.copy(str(cpath.resolve()), str(npath.resolve()))
466
+ except Exception as e:
467
+ print(e)
468
+ return None
469
+ update_lora_dict(str(npath))
470
+ return new_path
471
+ else:
472
+ return None
473
+
474
+
475
+ def download_my_lora(dl_urls: str, lora1: str, lora2: str, lora3: str, lora4: str, lora5: str):
476
+ path = download_lora(dl_urls)
477
+ if path:
478
+ if not lora1 or lora1 == "None":
479
+ lora1 = path
480
+ elif not lora2 or lora2 == "None":
481
+ lora2 = path
482
+ elif not lora3 or lora3 == "None":
483
+ lora3 = path
484
+ elif not lora4 or lora4 == "None":
485
+ lora4 = path
486
+ elif not lora5 or lora5 == "None":
487
+ lora5 = path
488
+ choices = get_all_lora_tupled_list()
489
+ return gr.update(value=lora1, choices=choices), gr.update(value=lora2, choices=choices), gr.update(value=lora3, choices=choices),\
490
+ gr.update(value=lora4, choices=choices), gr.update(value=lora5, choices=choices)
491
+
492
+
493
+ def get_valid_lora_name(query: str, model_name: str):
494
+ path = "None"
495
+ if not query or query == "None": return "None"
496
+ if to_lora_key(query) in loras_dict.keys(): return query
497
+ if query in loras_url_to_path_dict.keys():
498
+ path = loras_url_to_path_dict[query]
499
+ else:
500
+ path = to_lora_path(query.strip().split('/')[-1])
501
+ if Path(path).exists():
502
+ return path
503
+ elif "http" in query:
504
+ dl_file = download_lora(query)
505
+ if dl_file and Path(dl_file).exists(): return dl_file
506
+ else:
507
+ dl_file = find_similar_lora(query, model_name)
508
+ if dl_file and Path(dl_file).exists(): return dl_file
509
+ return "None"
510
+
511
+
512
+ def get_valid_lora_path(query: str):
513
+ path = None
514
+ if not query or query == "None": return None
515
+ if to_lora_key(query) in loras_dict.keys(): return query
516
+ if Path(path).exists():
517
+ return path
518
+ else:
519
+ return None
520
+
521
+
522
+ def get_valid_lora_wt(prompt: str, lora_path: str, lora_wt: float):
523
+ import re
524
+ wt = lora_wt
525
+ result = re.findall(f'<lora:{to_lora_key(lora_path)}:(.+?)>', prompt)
526
+ if not result: return wt
527
+ wt = safe_float(result[0][0])
528
+ return wt
529
+
530
+
531
+ def set_prompt_loras(prompt, prompt_syntax, model_name, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt):
532
+ import re
533
+ if not "Classic" in str(prompt_syntax): return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt
534
+ lora1 = get_valid_lora_name(lora1, model_name)
535
+ lora2 = get_valid_lora_name(lora2, model_name)
536
+ lora3 = get_valid_lora_name(lora3, model_name)
537
+ lora4 = get_valid_lora_name(lora4, model_name)
538
+ lora5 = get_valid_lora_name(lora5, model_name)
539
+ if not "<lora" in prompt: return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt
540
+ lora1_wt = get_valid_lora_wt(prompt, lora1, lora1_wt)
541
+ lora2_wt = get_valid_lora_wt(prompt, lora2, lora2_wt)
542
+ lora3_wt = get_valid_lora_wt(prompt, lora3, lora3_wt)
543
+ lora4_wt = get_valid_lora_wt(prompt, lora4, lora4_wt)
544
+ lora5_wt = get_valid_lora_wt(prompt, lora5, lora5_wt)
545
+ on1, label1, tag1, md1 = get_lora_info(lora1)
546
+ on2, label2, tag2, md2 = get_lora_info(lora2)
547
+ on3, label3, tag3, md3 = get_lora_info(lora3)
548
+ on4, label4, tag4, md4 = get_lora_info(lora4)
549
+ on5, label5, tag5, md5 = get_lora_info(lora5)
550
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
551
+ prompts = prompt.split(",") if prompt else []
552
+ for p in prompts:
553
+ p = str(p).strip()
554
+ if "<lora" in p:
555
+ result = re.findall(r'<lora:(.+?):(.+?)>', p)
556
+ if not result: continue
557
+ key = result[0][0]
558
+ wt = result[0][1]
559
+ path = to_lora_path(key)
560
+ if not key in loras_dict.keys() or not path:
561
+ path = get_valid_lora_name(path)
562
+ if not path or path == "None": continue
563
+ if path in lora_paths:
564
+ continue
565
+ elif not on1:
566
+ lora1 = path
567
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
568
+ lora1_wt = safe_float(wt)
569
+ on1 = True
570
+ elif not on2:
571
+ lora2 = path
572
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
573
+ lora2_wt = safe_float(wt)
574
+ on2 = True
575
+ elif not on3:
576
+ lora3 = path
577
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
578
+ lora3_wt = safe_float(wt)
579
+ on3 = True
580
+ elif not on4:
581
+ lora4 = path
582
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
583
+ lora4_wt = safe_float(wt)
584
+ on4, label4, tag4, md4 = get_lora_info(lora4)
585
+ elif not on5:
586
+ lora5 = path
587
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
588
+ lora5_wt = safe_float(wt)
589
+ on5 = True
590
+ return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt
591
+
592
+
593
+ def get_lora_info(lora_path: str):
594
+ is_valid = False
595
+ tag = ""
596
+ label = ""
597
+ md = "None"
598
+ if not lora_path or lora_path == "None":
599
+ print("LoRA file not found.")
600
+ return is_valid, label, tag, md
601
+ path = Path(lora_path)
602
+ new_path = Path(f'{path.parent.name}/{escape_lora_basename(path.stem)}{path.suffix}')
603
+ if not to_lora_key(str(new_path)) in loras_dict.keys() and str(path) not in set(get_all_lora_list()):
604
+ print("LoRA file is not registered.")
605
+ return tag, label, tag, md
606
+ if not new_path.exists():
607
+ download_private_file_from_somewhere(str(path), True)
608
+ basename = new_path.stem
609
+ label = f'Name: {basename}'
610
+ items = loras_dict.get(basename, None)
611
+ if items == None:
612
+ items = get_civitai_info(str(new_path))
613
+ if items != None:
614
+ loras_dict[basename] = items
615
+ if items and items[2] != "":
616
+ tag = items[0]
617
+ label = f'Name: {basename}'
618
+ if items[1] == "Pony":
619
+ label = f'Name: {basename} (for Pony🐴)'
620
+ if items[4]:
621
+ md = f'<img src="{items[4]}" alt="thumbnail" width="150" height="240"><br>[LoRA Model URL]({items[3]})'
622
+ elif items[3]:
623
+ md = f'[LoRA Model URL]({items[3]})'
624
+ is_valid = True
625
+ return is_valid, label, tag, md
626
+
627
+
628
+ def normalize_prompt_list(tags: list[str]):
629
+ prompts = []
630
+ for tag in tags:
631
+ tag = str(tag).strip()
632
+ if tag:
633
+ prompts.append(tag)
634
+ return prompts
635
+
636
+
637
+ def apply_lora_prompt(prompt: str = "", lora_info: str = ""):
638
+ if lora_info == "None": return gr.update(value=prompt)
639
+ tags = prompt.split(",") if prompt else []
640
+ prompts = normalize_prompt_list(tags)
641
+
642
+ lora_tag = lora_info.replace("/",",")
643
+ lora_tags = lora_tag.split(",") if str(lora_info) != "None" else []
644
+ lora_prompts = normalize_prompt_list(lora_tags)
645
+
646
+ empty = [""]
647
+ prompt = ", ".join(list_uniq(prompts + lora_prompts) + empty)
648
+ return gr.update(value=prompt)
649
+
650
+
651
+ def update_loras(prompt, prompt_syntax, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt):
652
+ import re
653
+ on1, label1, tag1, md1 = get_lora_info(lora1)
654
+ on2, label2, tag2, md2 = get_lora_info(lora2)
655
+ on3, label3, tag3, md3 = get_lora_info(lora3)
656
+ on4, label4, tag4, md4 = get_lora_info(lora4)
657
+ on5, label5, tag5, md5 = get_lora_info(lora5)
658
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
659
+
660
+ output_prompt = prompt
661
+ if "Classic" in str(prompt_syntax):
662
+ prompts = prompt.split(",") if prompt else []
663
+ output_prompts = []
664
+ for p in prompts:
665
+ p = str(p).strip()
666
+ if "<lora" in p:
667
+ result = re.findall(r'<lora:(.+?):(.+?)>', p)
668
+ if not result: continue
669
+ key = result[0][0]
670
+ wt = result[0][1]
671
+ path = to_lora_path(key)
672
+ if not key in loras_dict.keys() or not path: continue
673
+ if path in lora_paths:
674
+ output_prompts.append(f"<lora:{to_lora_key(path)}:{safe_float(wt):.2f}>")
675
+ elif p:
676
+ output_prompts.append(p)
677
+ lora_prompts = []
678
+ if on1: lora_prompts.append(f"<lora:{to_lora_key(lora1)}:{lora1_wt:.2f}>")
679
+ if on2: lora_prompts.append(f"<lora:{to_lora_key(lora2)}:{lora2_wt:.2f}>")
680
+ if on3: lora_prompts.append(f"<lora:{to_lora_key(lora3)}:{lora3_wt:.2f}>")
681
+ if on4: lora_prompts.append(f"<lora:{to_lora_key(lora4)}:{lora4_wt:.2f}>")
682
+ if on5: lora_prompts.append(f"<lora:{to_lora_key(lora5)}:{lora5_wt:.2f}>")
683
+ output_prompt = ", ".join(list_uniq(output_prompts + lora_prompts + [""]))
684
+ choices = get_all_lora_tupled_list()
685
+
686
+ return gr.update(value=output_prompt), gr.update(value=lora1, choices=choices), gr.update(value=lora1_wt),\
687
+ gr.update(value=tag1, label=label1, visible=on1), gr.update(visible=on1), gr.update(value=md1, visible=on1),\
688
+ gr.update(value=lora2, choices=choices), gr.update(value=lora2_wt),\
689
+ gr.update(value=tag2, label=label2, visible=on2), gr.update(visible=on2), gr.update(value=md2, visible=on2),\
690
+ gr.update(value=lora3, choices=choices), gr.update(value=lora3_wt),\
691
+ gr.update(value=tag3, label=label3, visible=on3), gr.update(visible=on3), gr.update(value=md3, visible=on3),\
692
+ gr.update(value=lora4, choices=choices), gr.update(value=lora4_wt),\
693
+ gr.update(value=tag4, label=label4, visible=on4), gr.update(visible=on4), gr.update(value=md4, visible=on4),\
694
+ gr.update(value=lora5, choices=choices), gr.update(value=lora5_wt),\
695
+ gr.update(value=tag5, label=label5, visible=on5), gr.update(visible=on5), gr.update(value=md5, visible=on5)
696
+
697
+
698
+ def get_my_lora(link_url):
699
+ from pathlib import Path
700
+ before = get_local_model_list(directory_loras)
701
+ for url in [url.strip() for url in link_url.split(',')]:
702
+ if not Path(f"{directory_loras}/{url.split('/')[-1]}").exists():
703
+ download_things(directory_loras, url, HF_TOKEN, CIVITAI_API_KEY)
704
+ after = get_local_model_list(directory_loras)
705
+ new_files = list_sub(after, before)
706
+ for file in new_files:
707
+ path = Path(file)
708
+ if path.exists():
709
+ new_path = Path(f'{path.parent.name}/{escape_lora_basename(path.stem)}{path.suffix}')
710
+ path.resolve().rename(new_path.resolve())
711
+ update_lora_dict(str(new_path))
712
+ new_lora_model_list = get_lora_model_list()
713
+ new_lora_tupled_list = get_all_lora_tupled_list()
714
+
715
+ return gr.update(
716
+ choices=new_lora_tupled_list, value=new_lora_model_list[-1]
717
+ ), gr.update(
718
+ choices=new_lora_tupled_list
719
+ ), gr.update(
720
+ choices=new_lora_tupled_list
721
+ ), gr.update(
722
+ choices=new_lora_tupled_list
723
+ ), gr.update(
724
+ choices=new_lora_tupled_list
725
+ )
726
+
727
+
728
+ def upload_file_lora(files, progress=gr.Progress(track_tqdm=True)):
729
+ progress(0, desc="Uploading...")
730
+ file_paths = [file.name for file in files]
731
+ progress(1, desc="Uploaded.")
732
+ return gr.update(value=file_paths, visible=True), gr.update(visible=True)
733
+
734
+
735
+ def move_file_lora(filepaths):
736
+ import shutil
737
+ for file in filepaths:
738
+ path = Path(shutil.move(Path(file).resolve(), Path(f"./{directory_loras}").resolve()))
739
+ newpath = Path(f'{path.parent.name}/{escape_lora_basename(path.stem)}{path.suffix}')
740
+ path.resolve().rename(newpath.resolve())
741
+ update_lora_dict(str(newpath))
742
+
743
+ new_lora_model_list = get_lora_model_list()
744
+ new_lora_tupled_list = get_all_lora_tupled_list()
745
+
746
+ return gr.update(
747
+ choices=new_lora_tupled_list, value=new_lora_model_list[-1]
748
+ ), gr.update(
749
+ choices=new_lora_tupled_list
750
+ ), gr.update(
751
+ choices=new_lora_tupled_list
752
+ ), gr.update(
753
+ choices=new_lora_tupled_list
754
+ ), gr.update(
755
+ choices=new_lora_tupled_list
756
+ )
757
+
758
+
759
+ def get_civitai_info(path):
760
+ global civitai_not_exists_list, loras_url_to_path_dict
761
+ import requests
762
+ from requests.adapters import HTTPAdapter
763
+ from urllib3.util import Retry
764
+ default = ["", "", "", "", ""]
765
+ if path in set(civitai_not_exists_list): return default
766
+ if not Path(path).exists(): return None
767
+ user_agent = get_user_agent()
768
+ headers = {'User-Agent': user_agent, 'content-type': 'application/json'}
769
+ base_url = 'https://civitai.com/api/v1/model-versions/by-hash/'
770
+ params = {}
771
+ session = requests.Session()
772
+ retries = Retry(total=5, backoff_factor=1, status_forcelist=[500, 502, 503, 504])
773
+ session.mount("https://", HTTPAdapter(max_retries=retries))
774
+ import hashlib
775
+ with open(path, 'rb') as file:
776
+ file_data = file.read()
777
+ hash_sha256 = hashlib.sha256(file_data).hexdigest()
778
+ url = base_url + hash_sha256
779
+ try:
780
+ r = session.get(url, params=params, headers=headers, stream=True, timeout=(3.0, 15))
781
+ except Exception as e:
782
+ print(e)
783
+ return default
784
+ else:
785
+ if not r.ok: return None
786
+ json = r.json()
787
+ if 'baseModel' not in json:
788
+ civitai_not_exists_list.append(path)
789
+ return default
790
+ items = []
791
+ items.append(" / ".join(json['trainedWords'])) # The words (prompts) used to trigger the model
792
+ items.append(json['baseModel']) # Base model (SDXL1.0, Pony, ...)
793
+ items.append(json['model']['name']) # The name of the model version
794
+ items.append(f"https://civitai.com/models/{json['modelId']}") # The repo url for the model
795
+ items.append(json['images'][0]['url']) # The url for a sample image
796
+ loras_url_to_path_dict[path] = json['downloadUrl'] # The download url to get the model file for this specific version
797
+ return items
798
+
799
+
800
+ def search_lora_on_civitai(query: str, allow_model: list[str] = ["Pony", "SDXL 1.0"], limit: int = 100,
801
+ sort: str = "Highest Rated", period: str = "AllTime", tag: str = ""):
802
+ import requests
803
+ from requests.adapters import HTTPAdapter
804
+ from urllib3.util import Retry
805
+ user_agent = get_user_agent()
806
+ headers = {'User-Agent': user_agent, 'content-type': 'application/json'}
807
+ base_url = 'https://civitai.com/api/v1/models'
808
+ params = {'types': ['LORA'], 'sort': sort, 'period': period, 'limit': limit, 'nsfw': 'true'}
809
+ if query: params["query"] = query
810
+ if tag: params["tag"] = tag
811
+ session = requests.Session()
812
+ retries = Retry(total=5, backoff_factor=1, status_forcelist=[500, 502, 503, 504])
813
+ session.mount("https://", HTTPAdapter(max_retries=retries))
814
+ try:
815
+ r = session.get(base_url, params=params, headers=headers, stream=True, timeout=(3.0, 30))
816
+ except Exception as e:
817
+ print(e)
818
+ return None
819
+ else:
820
+ if not r.ok: return None
821
+ json = r.json()
822
+ if 'items' not in json: return None
823
+ items = []
824
+ for j in json['items']:
825
+ for model in j['modelVersions']:
826
+ item = {}
827
+ if model['baseModel'] not in set(allow_model): continue
828
+ item['name'] = j['name']
829
+ item['creator'] = j['creator']['username']
830
+ item['tags'] = j['tags']
831
+ item['model_name'] = model['name']
832
+ item['base_model'] = model['baseModel']
833
+ item['dl_url'] = model['downloadUrl']
834
+ item['md'] = f'<img src="{model["images"][0]["url"]}" alt="thumbnail" width="150" height="240"><br>[LoRA Model URL](https://civitai.com/models/{j["id"]})'
835
+ items.append(item)
836
+ return items
837
+
838
+
839
+ def search_civitai_lora(query, base_model, sort="Highest Rated", period="AllTime", tag=""):
840
+ global civitai_lora_last_results
841
+ items = search_lora_on_civitai(query, base_model, 100, sort, period, tag)
842
+ if not items: return gr.update(choices=[("", "")], value="", visible=False),\
843
+ gr.update(value="", visible=False), gr.update(visible=True), gr.update(visible=True)
844
+ civitai_lora_last_results = {}
845
+ choices = []
846
+ for item in items:
847
+ base_model_name = "Pony🐴" if item['base_model'] == "Pony" else item['base_model']
848
+ name = f"{item['name']} (for {base_model_name} / By: {item['creator']} / Tags: {', '.join(item['tags'])})"
849
+ value = item['dl_url']
850
+ choices.append((name, value))
851
+ civitai_lora_last_results[value] = item
852
+ if not choices: return gr.update(choices=[("", "")], value="", visible=False),\
853
+ gr.update(value="", visible=False), gr.update(visible=True), gr.update(visible=True)
854
+ result = civitai_lora_last_results.get(choices[0][1], "None")
855
+ md = result['md'] if result else ""
856
+ return gr.update(choices=choices, value=choices[0][1], visible=True), gr.update(value=md, visible=True),\
857
+ gr.update(visible=True), gr.update(visible=True)
858
+
859
+
860
+ def select_civitai_lora(search_result):
861
+ if not "http" in search_result: return gr.update(value=""), gr.update(value="None", visible=True)
862
+ result = civitai_lora_last_results.get(search_result, "None")
863
+ md = result['md'] if result else ""
864
+ return gr.update(value=search_result), gr.update(value=md, visible=True)
865
+
866
+
867
+ LORA_BASE_MODEL_DICT = {
868
+ "diffusers:StableDiffusionPipeline": ["SD 1.5"],
869
+ "diffusers:StableDiffusionXLPipeline": ["Pony", "SDXL 1.0"],
870
+ "diffusers:FluxPipeline": ["Flux.1 D", "Flux.1 S"],
871
+ }
872
+
873
+
874
+ def get_lora_base_model(model_name: str):
875
+ api = HfApi(token=HF_TOKEN)
876
+ default = ["Pony", "SDXL 1.0"]
877
+ try:
878
+ model = api.model_info(repo_id=model_name, timeout=5.0)
879
+ tags = model.tags
880
+ for tag in tags:
881
+ if tag in LORA_BASE_MODEL_DICT.keys(): return LORA_BASE_MODEL_DICT.get(tag, default)
882
+ except Exception:
883
+ return default
884
+ return default
885
+
886
+
887
+ def find_similar_lora(q: str, model_name: str):
888
+ from rapidfuzz.process import extractOne
889
+ from rapidfuzz.utils import default_process
890
+ query = to_lora_key(q)
891
+ print(f"Finding <lora:{query}:...>...")
892
+ keys = list(private_lora_dict.keys())
893
+ values = [x[2] for x in list(private_lora_dict.values())]
894
+ s = default_process(query)
895
+ e1 = extractOne(s, keys + values, processor=default_process, score_cutoff=80.0)
896
+ key = ""
897
+ if e1:
898
+ e = e1[0]
899
+ if e in set(keys): key = e
900
+ elif e in set(values): key = keys[values.index(e)]
901
+ if key:
902
+ path = to_lora_path(key)
903
+ new_path = to_lora_path(query)
904
+ if not Path(path).exists():
905
+ if not Path(new_path).exists(): download_private_file_from_somewhere(path, True)
906
+ if Path(path).exists() and copy_lora(path, new_path): return new_path
907
+ print(f"Finding <lora:{query}:...> on Civitai...")
908
+ civitai_query = Path(query).stem if Path(query).is_file() else query
909
+ civitai_query = civitai_query.replace("_", " ").replace("-", " ")
910
+ base_model = get_lora_base_model(model_name)
911
+ items = search_lora_on_civitai(civitai_query, base_model, 1)
912
+ if items:
913
+ item = items[0]
914
+ path = download_lora(item['dl_url'])
915
+ new_path = query if Path(query).is_file() else to_lora_path(query)
916
+ if path and copy_lora(path, new_path): return new_path
917
+ return None
918
+
919
+
920
+ def change_interface_mode(mode: str):
921
+ if mode == "Fast":
922
+ return gr.update(open=False), gr.update(visible=True), gr.update(open=False), gr.update(open=False),\
923
+ gr.update(visible=True), gr.update(open=False), gr.update(visible=True), gr.update(open=False),\
924
+ gr.update(visible=True), gr.update(value="Fast")
925
+ elif mode == "Simple": # t2i mode
926
+ return gr.update(open=True), gr.update(visible=True), gr.update(open=False), gr.update(open=False),\
927
+ gr.update(visible=True), gr.update(open=False), gr.update(visible=False), gr.update(open=True),\
928
+ gr.update(visible=False), gr.update(value="Standard")
929
+ elif mode == "LoRA": # t2i LoRA mode
930
+ return gr.update(open=True), gr.update(visible=True), gr.update(open=True), gr.update(open=False),\
931
+ gr.update(visible=True), gr.update(open=True), gr.update(visible=True), gr.update(open=False),\
932
+ gr.update(visible=False), gr.update(value="Standard")
933
+ else: # Standard
934
+ return gr.update(open=False), gr.update(visible=True), gr.update(open=False), gr.update(open=False),\
935
+ gr.update(visible=True), gr.update(open=False), gr.update(visible=True), gr.update(open=False),\
936
+ gr.update(visible=True), gr.update(value="Standard")
937
+
938
+
939
+ quality_prompt_list = [
940
+ {
941
+ "name": "None",
942
+ "prompt": "",
943
+ "negative_prompt": "lowres",
944
+ },
945
+ {
946
+ "name": "Animagine Common",
947
+ "prompt": "anime artwork, anime style, vibrant, studio anime, highly detailed, masterpiece, best quality, very aesthetic, absurdres",
948
+ "negative_prompt": "lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]",
949
+ },
950
+ {
951
+ "name": "Pony Anime Common",
952
+ "prompt": "source_anime, score_9, score_8_up, score_7_up, masterpiece, best quality, very aesthetic, absurdres",
953
+ "negative_prompt": "source_pony, source_furry, source_cartoon, score_6, score_5, score_4, busty, ugly face, mutated hands, low res, blurry face, black and white, the simpsons, overwatch, apex legends",
954
+ },
955
+ {
956
+ "name": "Pony Common",
957
+ "prompt": "source_anime, score_9, score_8_up, score_7_up",
958
+ "negative_prompt": "source_pony, source_furry, source_cartoon, score_6, score_5, score_4, busty, ugly face, mutated hands, low res, blurry face, black and white, the simpsons, overwatch, apex legends",
959
+ },
960
+ {
961
+ "name": "Animagine Standard v3.0",
962
+ "prompt": "masterpiece, best quality",
963
+ "negative_prompt": "lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, artist name",
964
+ },
965
+ {
966
+ "name": "Animagine Standard v3.1",
967
+ "prompt": "masterpiece, best quality, very aesthetic, absurdres",
968
+ "negative_prompt": "lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]",
969
+ },
970
+ {
971
+ "name": "Animagine Light v3.1",
972
+ "prompt": "(masterpiece), best quality, very aesthetic, perfect face",
973
+ "negative_prompt": "(low quality, worst quality:1.2), very displeasing, 3d, watermark, signature, ugly, poorly drawn",
974
+ },
975
+ {
976
+ "name": "Animagine Heavy v3.1",
977
+ "prompt": "(masterpiece), (best quality), (ultra-detailed), very aesthetic, illustration, disheveled hair, perfect composition, moist skin, intricate details",
978
+ "negative_prompt": "longbody, lowres, bad anatomy, bad hands, missing fingers, pubic hair, extra digit, fewer digits, cropped, worst quality, low quality, very displeasing",
979
+ },
980
+ ]
981
+
982
+
983
+ style_list = [
984
+ {
985
+ "name": "None",
986
+ "prompt": "",
987
+ "negative_prompt": "",
988
+ },
989
+ {
990
+ "name": "Cinematic",
991
+ "prompt": "cinematic still, emotional, harmonious, vignette, highly detailed, high budget, bokeh, cinemascope, moody, epic, gorgeous, film grain, grainy",
992
+ "negative_prompt": "cartoon, graphic, text, painting, crayon, graphite, abstract, glitch, deformed, mutated, ugly, disfigured",
993
+ },
994
+ {
995
+ "name": "Photographic",
996
+ "prompt": "cinematic photo, 35mm photograph, film, bokeh, professional, 4k, highly detailed",
997
+ "negative_prompt": "drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly",
998
+ },
999
+ {
1000
+ "name": "Anime",
1001
+ "prompt": "anime artwork, anime style, vibrant, studio anime, highly detailed",
1002
+ "negative_prompt": "photo, deformed, black and white, realism, disfigured, low contrast",
1003
+ },
1004
+ {
1005
+ "name": "Manga",
1006
+ "prompt": "manga style, vibrant, high-energy, detailed, iconic, Japanese comic style",
1007
+ "negative_prompt": "ugly, deformed, noisy, blurry, low contrast, realism, photorealistic, Western comic style",
1008
+ },
1009
+ {
1010
+ "name": "Digital Art",
1011
+ "prompt": "concept art, digital artwork, illustrative, painterly, matte painting, highly detailed",
1012
+ "negative_prompt": "photo, photorealistic, realism, ugly",
1013
+ },
1014
+ {
1015
+ "name": "Pixel art",
1016
+ "prompt": "pixel-art, low-res, blocky, pixel art style, 8-bit graphics",
1017
+ "negative_prompt": "sloppy, messy, blurry, noisy, highly detailed, ultra textured, photo, realistic",
1018
+ },
1019
+ {
1020
+ "name": "Fantasy art",
1021
+ "prompt": "ethereal fantasy concept art, magnificent, celestial, ethereal, painterly, epic, majestic, magical, fantasy art, cover art, dreamy",
1022
+ "negative_prompt": "photographic, realistic, realism, 35mm film, dslr, cropped, frame, text, deformed, glitch, noise, noisy, off-center, deformed, cross-eyed, closed eyes, bad anatomy, ugly, disfigured, sloppy, duplicate, mutated, black and white",
1023
+ },
1024
+ {
1025
+ "name": "Neonpunk",
1026
+ "prompt": "neonpunk style, cyberpunk, vaporwave, neon, vibes, vibrant, stunningly beautiful, crisp, detailed, sleek, ultramodern, magenta highlights, dark purple shadows, high contrast, cinematic, ultra detailed, intricate, professional",
1027
+ "negative_prompt": "painting, drawing, illustration, glitch, deformed, mutated, cross-eyed, ugly, disfigured",
1028
+ },
1029
+ {
1030
+ "name": "3D Model",
1031
+ "prompt": "professional 3d model, octane render, highly detailed, volumetric, dramatic lighting",
1032
+ "negative_prompt": "ugly, deformed, noisy, low poly, blurry, painting",
1033
+ },
1034
+ ]
1035
+
1036
+
1037
+ optimization_list = {
1038
+ "None": [28, 7., 'Euler a', False, 'None', 1.],
1039
+ "Default": [28, 7., 'Euler a', False, 'None', 1.],
1040
+ "SPO": [28, 7., 'Euler a', True, 'loras/spo_sdxl_10ep_4k-data_lora_diffusers.safetensors', 1.],
1041
+ "DPO": [28, 7., 'Euler a', True, 'loras/sdxl-DPO-LoRA.safetensors', 1.],
1042
+ "DPO Turbo": [8, 2.5, 'LCM', True, 'loras/sd_xl_dpo_turbo_lora_v1-128dim.safetensors', 1.],
1043
+ "SDXL Turbo": [8, 2.5, 'LCM', True, 'loras/sd_xl_turbo_lora_v1.safetensors', 1.],
1044
+ "Hyper-SDXL 12step": [12, 5., 'TCD', True, 'loras/Hyper-SDXL-12steps-CFG-lora.safetensors', 1.],
1045
+ "Hyper-SDXL 8step": [8, 5., 'TCD', True, 'loras/Hyper-SDXL-8steps-CFG-lora.safetensors', 1.],
1046
+ "Hyper-SDXL 4step": [4, 0, 'TCD', True, 'loras/Hyper-SDXL-4steps-lora.safetensors', 1.],
1047
+ "Hyper-SDXL 2step": [2, 0, 'TCD', True, 'loras/Hyper-SDXL-2steps-lora.safetensors', 1.],
1048
+ "Hyper-SDXL 1step": [1, 0, 'TCD', True, 'loras/Hyper-SDXL-1steps-lora.safetensors', 1.],
1049
+ "PCM 16step": [16, 4., 'Euler a trailing', True, 'loras/pcm_sdxl_normalcfg_16step_converted.safetensors', 1.],
1050
+ "PCM 8step": [8, 4., 'Euler a trailing', True, 'loras/pcm_sdxl_normalcfg_8step_converted.safetensors', 1.],
1051
+ "PCM 4step": [4, 2., 'Euler a trailing', True, 'loras/pcm_sdxl_smallcfg_4step_converted.safetensors', 1.],
1052
+ "PCM 2step": [2, 1., 'Euler a trailing', True, 'loras/pcm_sdxl_smallcfg_2step_converted.safetensors', 1.],
1053
+ }
1054
+
1055
+
1056
+ def set_optimization(opt, steps_gui, cfg_gui, sampler_gui, clip_skip_gui, lora_gui, lora_scale_gui):
1057
+ if not opt in list(optimization_list.keys()): opt = "None"
1058
+ def_steps_gui = 28
1059
+ def_cfg_gui = 7.
1060
+ steps = optimization_list.get(opt, "None")[0]
1061
+ cfg = optimization_list.get(opt, "None")[1]
1062
+ sampler = optimization_list.get(opt, "None")[2]
1063
+ clip_skip = optimization_list.get(opt, "None")[3]
1064
+ lora = optimization_list.get(opt, "None")[4]
1065
+ lora_scale = optimization_list.get(opt, "None")[5]
1066
+ if opt == "None":
1067
+ steps = max(steps_gui, def_steps_gui)
1068
+ cfg = max(cfg_gui, def_cfg_gui)
1069
+ clip_skip = clip_skip_gui
1070
+ elif opt == "SPO" or opt == "DPO":
1071
+ steps = max(steps_gui, def_steps_gui)
1072
+ cfg = max(cfg_gui, def_cfg_gui)
1073
+
1074
+ return gr.update(value=steps), gr.update(value=cfg), gr.update(value=sampler),\
1075
+ gr.update(value=clip_skip), gr.update(value=lora), gr.update(value=lora_scale),
1076
+
1077
+
1078
+ # [sampler_gui, steps_gui, cfg_gui, clip_skip_gui, img_width_gui, img_height_gui, optimization_gui]
1079
+ preset_sampler_setting = {
1080
+ "None": ["Euler a", 28, 7., True, 1024, 1024, "None"],
1081
+ "Anime 3:4 Fast": ["LCM", 8, 2.5, True, 896, 1152, "DPO Turbo"],
1082
+ "Anime 3:4 Standard": ["Euler a", 28, 7., True, 896, 1152, "None"],
1083
+ "Anime 3:4 Heavy": ["Euler a", 40, 7., True, 896, 1152, "None"],
1084
+ "Anime 1:1 Fast": ["LCM", 8, 2.5, True, 1024, 1024, "DPO Turbo"],
1085
+ "Anime 1:1 Standard": ["Euler a", 28, 7., True, 1024, 1024, "None"],
1086
+ "Anime 1:1 Heavy": ["Euler a", 40, 7., True, 1024, 1024, "None"],
1087
+ "Photo 3:4 Fast": ["LCM", 8, 2.5, False, 896, 1152, "DPO Turbo"],
1088
+ "Photo 3:4 Standard": ["DPM++ 2M Karras", 28, 7., False, 896, 1152, "None"],
1089
+ "Photo 3:4 Heavy": ["DPM++ 2M Karras", 40, 7., False, 896, 1152, "None"],
1090
+ "Photo 1:1 Fast": ["LCM", 8, 2.5, False, 1024, 1024, "DPO Turbo"],
1091
+ "Photo 1:1 Standard": ["DPM++ 2M Karras", 28, 7., False, 1024, 1024, "None"],
1092
+ "Photo 1:1 Heavy": ["DPM++ 2M Karras", 40, 7., False, 1024, 1024, "None"],
1093
+ }
1094
+
1095
+
1096
+ def set_sampler_settings(sampler_setting):
1097
+ if not sampler_setting in list(preset_sampler_setting.keys()) or sampler_setting == "None":
1098
+ return gr.update(value="Euler a"), gr.update(value=28), gr.update(value=7.), gr.update(value=True),\
1099
+ gr.update(value=1024), gr.update(value=1024), gr.update(value="None")
1100
+ v = preset_sampler_setting.get(sampler_setting, ["Euler a", 28, 7., True, 1024, 1024])
1101
+ # sampler, steps, cfg, clip_skip, width, height, optimization
1102
+ return gr.update(value=v[0]), gr.update(value=v[1]), gr.update(value=v[2]), gr.update(value=v[3]),\
1103
+ gr.update(value=v[4]), gr.update(value=v[5]), gr.update(value=v[6])
1104
+
1105
+
1106
+ preset_styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in style_list}
1107
+ preset_quality = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in quality_prompt_list}
1108
+
1109
+
1110
+ def process_style_prompt(prompt: str, neg_prompt: str, styles_key: str = "None", quality_key: str = "None", type: str = "Auto"):
1111
+ def to_list(s):
1112
+ return [x.strip() for x in s.split(",") if not s == ""]
1113
+
1114
+ def list_sub(a, b):
1115
+ return [e for e in a if e not in b]
1116
+
1117
+ def list_uniq(l):
1118
+ return sorted(set(l), key=l.index)
1119
+
1120
+ animagine_ps = to_list("anime artwork, anime style, vibrant, studio anime, highly detailed, masterpiece, best quality, very aesthetic, absurdres")
1121
+ animagine_nps = to_list("lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
1122
+ pony_ps = to_list("source_anime, score_9, score_8_up, score_7_up, masterpiece, best quality, very aesthetic, absurdres")
1123
+ pony_nps = to_list("source_pony, source_furry, source_cartoon, score_6, score_5, score_4, busty, ugly face, mutated hands, low res, blurry face, black and white, the simpsons, overwatch, apex legends")
1124
+ prompts = to_list(prompt)
1125
+ neg_prompts = to_list(neg_prompt)
1126
+
1127
+ all_styles_ps = []
1128
+ all_styles_nps = []
1129
+ for d in style_list:
1130
+ all_styles_ps.extend(to_list(str(d.get("prompt", ""))))
1131
+ all_styles_nps.extend(to_list(str(d.get("negative_prompt", ""))))
1132
+
1133
+ all_quality_ps = []
1134
+ all_quality_nps = []
1135
+ for d in quality_prompt_list:
1136
+ all_quality_ps.extend(to_list(str(d.get("prompt", ""))))
1137
+ all_quality_nps.extend(to_list(str(d.get("negative_prompt", ""))))
1138
+
1139
+ quality_ps = to_list(preset_quality[quality_key][0])
1140
+ quality_nps = to_list(preset_quality[quality_key][1])
1141
+ styles_ps = to_list(preset_styles[styles_key][0])
1142
+ styles_nps = to_list(preset_styles[styles_key][1])
1143
+
1144
+ prompts = list_sub(prompts, animagine_ps + pony_ps + all_styles_ps + all_quality_ps)
1145
+ neg_prompts = list_sub(neg_prompts, animagine_nps + pony_nps + all_styles_nps + all_quality_nps)
1146
+
1147
+ last_empty_p = [""] if not prompts and type != "None" and type != "Auto" and styles_key != "None" and quality_key != "None" else []
1148
+ last_empty_np = [""] if not neg_prompts and type != "None" and type != "Auto" and styles_key != "None" and quality_key != "None" else []
1149
+
1150
+ if type == "Animagine":
1151
+ prompts = prompts + animagine_ps
1152
+ neg_prompts = neg_prompts + animagine_nps
1153
+ elif type == "Pony":
1154
+ prompts = prompts + pony_ps
1155
+ neg_prompts = neg_prompts + pony_nps
1156
+
1157
+ prompts = prompts + styles_ps + quality_ps
1158
+ neg_prompts = neg_prompts + styles_nps + quality_nps
1159
+
1160
+ prompt = ", ".join(list_uniq(prompts) + last_empty_p)
1161
+ neg_prompt = ", ".join(list_uniq(neg_prompts) + last_empty_np)
1162
+
1163
+ return gr.update(value=prompt), gr.update(value=neg_prompt), gr.update(value=type)
1164
+
1165
+
1166
+ def set_quick_presets(genre:str = "None", type:str = "Auto", speed:str = "None", aspect:str = "None"):
1167
+ quality = "None"
1168
+ style = "None"
1169
+ sampler = "None"
1170
+ opt = "None"
1171
+
1172
+ if genre == "Anime":
1173
+ if type != "None" and type != "Auto": style = "Anime"
1174
+ if aspect == "1:1":
1175
+ if speed == "Heavy":
1176
+ sampler = "Anime 1:1 Heavy"
1177
+ elif speed == "Fast":
1178
+ sampler = "Anime 1:1 Fast"
1179
+ else:
1180
+ sampler = "Anime 1:1 Standard"
1181
+ elif aspect == "3:4":
1182
+ if speed == "Heavy":
1183
+ sampler = "Anime 3:4 Heavy"
1184
+ elif speed == "Fast":
1185
+ sampler = "Anime 3:4 Fast"
1186
+ else:
1187
+ sampler = "Anime 3:4 Standard"
1188
+ if type == "Pony":
1189
+ quality = "Pony Anime Common"
1190
+ elif type == "Animagine":
1191
+ quality = "Animagine Common"
1192
+ else:
1193
+ quality = "None"
1194
+ elif genre == "Photo":
1195
+ if type != "None" and type != "Auto": style = "Photographic"
1196
+ if aspect == "1:1":
1197
+ if speed == "Heavy":
1198
+ sampler = "Photo 1:1 Heavy"
1199
+ elif speed == "Fast":
1200
+ sampler = "Photo 1:1 Fast"
1201
+ else:
1202
+ sampler = "Photo 1:1 Standard"
1203
+ elif aspect == "3:4":
1204
+ if speed == "Heavy":
1205
+ sampler = "Photo 3:4 Heavy"
1206
+ elif speed == "Fast":
1207
+ sampler = "Photo 3:4 Fast"
1208
+ else:
1209
+ sampler = "Photo 3:4 Standard"
1210
+ if type == "Pony":
1211
+ quality = "Pony Common"
1212
+ else:
1213
+ quality = "None"
1214
+
1215
+ if speed == "Fast":
1216
+ opt = "DPO Turbo"
1217
+ if genre == "Anime" and type != "Pony" and type != "Auto": quality = "Animagine Light v3.1"
1218
+
1219
+ return gr.update(value=quality), gr.update(value=style), gr.update(value=sampler), gr.update(value=opt), gr.update(value=type)
1220
+
1221
+
1222
+ textual_inversion_dict = {}
1223
+ try:
1224
+ with open('textual_inversion_dict.json', encoding='utf-8') as f:
1225
+ textual_inversion_dict = json.load(f)
1226
+ except Exception:
1227
+ pass
1228
+ textual_inversion_file_token_list = []
1229
+
1230
+
1231
+ def get_tupled_embed_list(embed_list):
1232
+ global textual_inversion_file_list
1233
+ tupled_list = []
1234
+ for file in embed_list:
1235
+ token = textual_inversion_dict.get(Path(file).name, [Path(file).stem.replace(",",""), False])[0]
1236
+ tupled_list.append((token, file))
1237
+ textual_inversion_file_token_list.append(token)
1238
+ return tupled_list
1239
+
1240
+
1241
+ def set_textual_inversion_prompt(textual_inversion_gui, prompt_gui, neg_prompt_gui, prompt_syntax_gui):
1242
+ ti_tags = list(textual_inversion_dict.values()) + textual_inversion_file_token_list
1243
+ tags = prompt_gui.split(",") if prompt_gui else []
1244
+ prompts = []
1245
+ for tag in tags:
1246
+ tag = str(tag).strip()
1247
+ if tag and not tag in ti_tags:
1248
+ prompts.append(tag)
1249
+ ntags = neg_prompt_gui.split(",") if neg_prompt_gui else []
1250
+ neg_prompts = []
1251
+ for tag in ntags:
1252
+ tag = str(tag).strip()
1253
+ if tag and not tag in ti_tags:
1254
+ neg_prompts.append(tag)
1255
+ ti_prompts = []
1256
+ ti_neg_prompts = []
1257
+ for ti in textual_inversion_gui:
1258
+ tokens = textual_inversion_dict.get(Path(ti).name, [Path(ti).stem.replace(",",""), False])
1259
+ is_positive = tokens[1] == True or "positive" in Path(ti).parent.name
1260
+ if is_positive: # positive prompt
1261
+ ti_prompts.append(tokens[0])
1262
+ else: # negative prompt (default)
1263
+ ti_neg_prompts.append(tokens[0])
1264
+ empty = [""]
1265
+ prompt = ", ".join(prompts + ti_prompts + empty)
1266
+ neg_prompt = ", ".join(neg_prompts + ti_neg_prompts + empty)
1267
+ return gr.update(value=prompt), gr.update(value=neg_prompt),
1268
+
1269
+
1270
+ def get_model_pipeline(repo_id: str):
1271
+ from huggingface_hub import HfApi
1272
+ api = HfApi(token=HF_TOKEN)
1273
+ default = "StableDiffusionPipeline"
1274
+ try:
1275
+ if not is_repo_name(repo_id): return default
1276
+ model = api.model_info(repo_id=repo_id, timeout=5.0)
1277
+ except Exception:
1278
+ return default
1279
+ if model.private or model.gated: return default
1280
+ tags = model.tags
1281
+ if not 'diffusers' in tags: return default
1282
+ if 'diffusers:FluxPipeline' in tags:
1283
+ return "FluxPipeline"
1284
+ if 'diffusers:StableDiffusionXLPipeline' in tags:
1285
+ return "StableDiffusionXLPipeline"
1286
+ elif 'diffusers:StableDiffusionPipeline' in tags:
1287
+ return "StableDiffusionPipeline"
1288
+ else:
1289
+ return default
1290
+
packages.txt CHANGED
@@ -1,3 +1,3 @@
1
  git-lfs
2
- aria2
3
  ffmpeg
 
1
  git-lfs
2
+ aria2 -y
3
  ffmpeg
pre-requirements.txt DELETED
@@ -1 +0,0 @@
1
- pip>=23.0.0
 
 
requirements.txt CHANGED
@@ -1,13 +1,4 @@
1
- stablepy==0.6.5
2
- torch==2.5.1
3
- diffusers
4
  gdown
5
- opencv-python
6
- unidecode
7
- pydantic==2.10.6
8
- huggingface_hub
9
- hf_transfer
10
- hf_xet
11
- spaces
12
- gradio==5.44.1
13
- matplotlib-inline
 
1
+ git+https://github.com/R3gm/stablepy.git@flux_beta
2
+ torch==2.2.0
 
3
  gdown
4
+ opencv-python
 
 
 
 
 
 
 
 
stablepy_model.py ADDED
File without changes
utils.py CHANGED
@@ -1,714 +1,50 @@
1
- import os
2
- import re
3
  import gradio as gr
4
- from constants import (
5
- DIFFUSERS_FORMAT_LORAS,
6
- CIVITAI_API_KEY,
7
- HF_TOKEN,
8
- MODEL_TYPE_CLASS,
9
- DIRECTORY_LORAS,
10
- DIRECTORY_MODELS,
11
- DIFFUSECRAFT_CHECKPOINT_NAME,
12
- CACHE_HF_ROOT,
13
- CACHE_HF,
14
- STORAGE_ROOT,
15
- )
16
- from huggingface_hub import HfApi, get_hf_file_metadata, snapshot_download
17
- from diffusers import DiffusionPipeline
18
- from huggingface_hub import model_info as model_info_data
19
- from diffusers.pipelines.pipeline_loading_utils import variant_compatible_siblings
20
- from stablepy.diffusers_vanilla.utils import checkpoint_model_type
21
- from pathlib import PosixPath
22
- from unidecode import unidecode
23
- import urllib.parse
24
- import copy
25
- import requests
26
- from requests.adapters import HTTPAdapter
27
- from urllib3.util import Retry
28
- import shutil
29
- import subprocess
30
- import json
31
- import html as _html
32
-
33
- IS_ZERO_GPU = bool(os.getenv("SPACES_ZERO_GPU"))
34
- USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:127.0) Gecko/20100101 Firefox/127.0'
35
- MODEL_ARCH = {
36
- 'stable-diffusion-xl-v1-base/lora': "Stable Diffusion XL (Illustrious, Pony, NoobAI)",
37
- 'stable-diffusion-v1/lora': "Stable Diffusion 1.5",
38
- 'flux-1-dev/lora': "Flux",
39
- }
40
-
41
-
42
- def read_safetensors_header_from_url(url: str):
43
- """Read safetensors header from a remote Hugging Face file."""
44
- meta = get_hf_file_metadata(url)
45
-
46
- # Step 1: first 8 bytes → header length
47
- resp = requests.get(meta.location, headers={"Range": "bytes=0-7"})
48
- resp.raise_for_status()
49
- header_len = int.from_bytes(resp.content, "little")
50
-
51
- # Step 2: fetch full header JSON
52
- end = 8 + header_len - 1
53
- resp = requests.get(meta.location, headers={"Range": f"bytes=8-{end}"})
54
- resp.raise_for_status()
55
- header_json = resp.content.decode("utf-8")
56
-
57
- return json.loads(header_json)
58
-
59
-
60
- def read_safetensors_header_from_file(path: str):
61
- """Read safetensors header from a local file."""
62
- with open(path, "rb") as f:
63
- # Step 1: first 8 bytes → header length
64
- header_len = int.from_bytes(f.read(8), "little")
65
-
66
- # Step 2: read header JSON
67
- header_json = f.read(header_len).decode("utf-8")
68
-
69
- return json.loads(header_json)
70
-
71
-
72
- class LoraHeaderInformation:
73
- """
74
- Encapsulates parsed info from a LoRA JSON header and provides
75
- a compact HTML summary via .to_html().
76
- """
77
-
78
- def __init__(self, json_data):
79
- self.original_json = copy.deepcopy(json_data or {})
80
-
81
- # Check if text encoder was trained
82
- # guard for json_data being a mapping
83
- try:
84
- self.text_encoder_trained = any("text_model" in ln for ln in json_data)
85
- except Exception:
86
- self.text_encoder_trained = False
87
-
88
- # Metadata (may be None)
89
- metadata = (json_data or {}).get("__metadata__", None)
90
- self.metadata = metadata
91
-
92
- # Default values
93
- self.architecture = "undefined"
94
- self.prediction_type = "undefined"
95
- self.base_model = "undefined"
96
- self.author = "undefined"
97
- self.title = "undefined"
98
- self.common_tags_list = []
99
-
100
- if metadata:
101
- self.architecture = MODEL_ARCH.get(
102
- metadata.get('modelspec.architecture', None),
103
- "undefined"
104
- )
105
-
106
- self.prediction_type = metadata.get('modelspec.prediction_type', "undefined")
107
- self.base_model = metadata.get('ss_sd_model_name', "undefined")
108
- self.author = metadata.get('modelspec.author', "undefined")
109
- self.title = metadata.get('modelspec.title', "undefined")
110
-
111
- base_model_hash = metadata.get('ss_new_sd_model_hash', None) # SHA256
112
- # AUTOV1 ss_sd_model_hash
113
- # https://civitai.com/api/v1/model-versions/by-hash/{base_model_hash} # Info
114
- if base_model_hash:
115
- self.base_model += f" hash={base_model_hash}"
116
-
117
- # Extract tags
118
- try:
119
- tags = metadata.get('ss_tag_frequency') if "ss_tag_frequency" in metadata else metadata.get('ss_datasets', "")
120
- tags = json.loads(tags) if tags else ""
121
-
122
- if isinstance(tags, list):
123
- tags = tags[0].get("tag_frequency", {})
124
-
125
- if tags:
126
- self.common_tags_list = list(tags[list(tags.keys())[0]].keys())
127
- except Exception:
128
- self.common_tags_list = []
129
-
130
- def to_dict(self):
131
- """Return a plain dict summary of parsed fields."""
132
- return {
133
- "architecture": self.architecture,
134
- "prediction_type": self.prediction_type,
135
- "base_model": self.base_model,
136
- "author": self.author,
137
- "title": self.title,
138
- "text_encoder_trained": bool(self.text_encoder_trained),
139
- "common_tags": self.common_tags_list,
140
- }
141
-
142
- def to_html(self, limit_tags=20):
143
- """
144
- Return a compact HTML snippet (string) showing the parsed info
145
- in a small font. Values are HTML-escaped.
146
- """
147
- # helper to escape
148
- esc = _html.escape
149
-
150
- rows = [
151
- ("Title", esc(str(self.title))),
152
- ("Author", esc(str(self.author))),
153
- ("Architecture", esc(str(self.architecture))),
154
- ("Base model", esc(str(self.base_model))),
155
- ("Prediction type", esc(str(self.prediction_type))),
156
- ("Text encoder trained", esc(str(self.text_encoder_trained))),
157
- ("Reference tags", esc(str(", ".join(self.common_tags_list[:limit_tags])))),
158
- ]
159
-
160
- # small, compact table with inline styling (small font)
161
- html_rows = "".join(
162
- f"<tr><th style='text-align:left;padding:2px 6px;white-space:nowrap'>{k}</th>"
163
- f"<td style='padding:2px 6px'>{v}</td></tr>"
164
- for k, v in rows
165
- )
166
-
167
- html_snippet = (
168
- "<div style='font-family:system-ui, -apple-system, \"Segoe UI\", Roboto, "
169
- "Helvetica, Arial, \"Noto Sans\", sans-serif; font-size:12px; line-height:1.2; "
170
- "'>"
171
- f"<table style='border-collapse:collapse; font-size:12px;'>"
172
- f"{html_rows}"
173
- "</table>"
174
- "</div>"
175
- )
176
-
177
- return html_snippet
178
-
179
-
180
- def request_json_data(url):
181
- model_version_id = url.split('/')[-1]
182
- if "?modelVersionId=" in model_version_id:
183
- match = re.search(r'modelVersionId=(\d+)', url)
184
- model_version_id = match.group(1)
185
-
186
- endpoint_url = f"https://civitai.com/api/v1/model-versions/{model_version_id}"
187
-
188
- params = {}
189
- headers = {'User-Agent': USER_AGENT, 'content-type': 'application/json'}
190
- session = requests.Session()
191
- retries = Retry(total=5, backoff_factor=1, status_forcelist=[500, 502, 503, 504])
192
- session.mount("https://", HTTPAdapter(max_retries=retries))
193
-
194
- try:
195
- result = session.get(endpoint_url, params=params, headers=headers, stream=True, timeout=(3.0, 15))
196
- result.raise_for_status()
197
- json_data = result.json()
198
- return json_data if json_data else None
199
- except Exception as e:
200
- print(f"Error: {e}")
201
- return None
202
-
203
-
204
- class ModelInformation:
205
- def __init__(self, json_data):
206
- self.model_version_id = json_data.get("id", "")
207
- self.model_id = json_data.get("modelId", "")
208
- self.download_url = json_data.get("downloadUrl", "")
209
- self.model_url = f"https://civitai.com/models/{self.model_id}?modelVersionId={self.model_version_id}"
210
- self.filename_url = next(
211
- (v.get("name", "") for v in json_data.get("files", []) if str(self.model_version_id) in v.get("downloadUrl", "") and v.get("type", "Model") == "Model"), ""
212
- )
213
- self.filename_url = self.filename_url if self.filename_url else ""
214
- self.description = json_data.get("description", "")
215
- if self.description is None:
216
- self.description = ""
217
- self.model_name = json_data.get("model", {}).get("name", "")
218
- self.model_type = json_data.get("model", {}).get("type", "")
219
- self.nsfw = json_data.get("model", {}).get("nsfw", False)
220
- self.poi = json_data.get("model", {}).get("poi", False)
221
- self.images = [img.get("url", "") for img in json_data.get("images", [])]
222
- self.example_prompt = json_data.get("trainedWords", [""])[0] if json_data.get("trainedWords") else ""
223
- self.original_json = copy.deepcopy(json_data)
224
-
225
-
226
- def get_civit_params(url):
227
- try:
228
- json_data = request_json_data(url)
229
- mdc = ModelInformation(json_data)
230
- if mdc.download_url and mdc.filename_url:
231
- return mdc.download_url, mdc.filename_url, mdc.model_url
232
- else:
233
- ValueError("Invalid Civitai model URL")
234
- except Exception as e:
235
- print(f"Error retrieving Civitai metadata: {e} — fallback to direct download")
236
- return url, None, None
237
-
238
-
239
- def civ_redirect_down(url, dir_, civitai_api_key, romanize, alternative_name):
240
- filename_base = filename = None
241
-
242
- if alternative_name:
243
- output_path = os.path.join(dir_, alternative_name)
244
- if os.path.exists(output_path):
245
- return output_path, alternative_name
246
-
247
- # Follow the redirect to get the actual download URL
248
- curl_command = (
249
- f'curl -L -sI --connect-timeout 5 --max-time 5 '
250
- f'-H "Content-Type: application/json" '
251
- f'-H "Authorization: Bearer {civitai_api_key}" "{url}"'
252
- )
253
-
254
- headers = os.popen(curl_command).read()
255
-
256
- # Look for the redirected "Location" URL
257
- location_match = re.search(r'location: (.+)', headers, re.IGNORECASE)
258
-
259
- if location_match:
260
- redirect_url = location_match.group(1).strip()
261
-
262
- # Extract the filename from the redirect URL's "Content-Disposition"
263
- filename_match = re.search(r'filename%3D%22(.+?)%22', redirect_url)
264
- if filename_match:
265
- encoded_filename = filename_match.group(1)
266
- # Decode the URL-encoded filename
267
- decoded_filename = urllib.parse.unquote(encoded_filename)
268
-
269
- filename = unidecode(decoded_filename) if romanize else decoded_filename
270
- # print(f"Filename redirect: {filename}")
271
-
272
- filename_base = alternative_name if alternative_name else filename
273
- if not filename_base:
274
- return None, None
275
- elif os.path.exists(os.path.join(dir_, filename_base)):
276
- return os.path.join(dir_, filename_base), filename_base
277
-
278
- aria2_command = (
279
- f'aria2c --console-log-level=error --summary-interval=10 -c -x 16 '
280
- f'-k 1M -s 16 -d "{dir_}" -o "{filename_base}" "{redirect_url}"'
281
- )
282
- r_code = os.system(aria2_command) # noqa
283
-
284
- # if r_code != 0:
285
- # raise RuntimeError(f"Failed to download file: {filename_base}. Error code: {r_code}")
286
-
287
- output_path = os.path.join(dir_, filename_base)
288
- if not os.path.exists(output_path):
289
- return None, filename_base
290
-
291
- return output_path, filename_base
292
-
293
-
294
- def civ_api_down(url, dir_, civitai_api_key, civ_filename):
295
- """
296
- This method is susceptible to being blocked because it generates a lot of temp redirect links with aria2c.
297
- If an API key limit is reached, generating a new API key and using it can fix the issue.
298
- """
299
- output_path = None
300
-
301
- url_dl = url + f"?token={civitai_api_key}"
302
- if not civ_filename:
303
- aria2_command = f'aria2c -c -x 1 -s 1 -d "{dir_}" "{url_dl}"'
304
- os.system(aria2_command)
305
- else:
306
- output_path = os.path.join(dir_, civ_filename)
307
- if not os.path.exists(output_path):
308
- aria2_command = (
309
- f'aria2c --console-log-level=error --summary-interval=10 -c -x 16 '
310
- f'-k 1M -s 16 -d "{dir_}" -o "{civ_filename}" "{url_dl}"'
311
- )
312
- os.system(aria2_command)
313
-
314
- return output_path
315
-
316
-
317
- def drive_down(url, dir_):
318
- import gdown
319
-
320
- output_path = None
321
-
322
- drive_id, _ = gdown.parse_url.parse_url(url, warning=False)
323
- dir_files = os.listdir(dir_)
324
-
325
- for dfile in dir_files:
326
- if drive_id in dfile:
327
- output_path = os.path.join(dir_, dfile)
328
- break
329
-
330
- if not output_path:
331
- original_path = gdown.download(url, f"{dir_}/", fuzzy=True)
332
-
333
- dir_name, base_name = os.path.split(original_path)
334
- name, ext = base_name.rsplit(".", 1)
335
- new_name = f"{name}_{drive_id}.{ext}"
336
- output_path = os.path.join(dir_name, new_name)
337
-
338
- os.rename(original_path, output_path)
339
-
340
- return output_path
341
-
342
-
343
- def hf_down(url, dir_, hf_token, romanize):
344
- url = url.replace("?download=true", "")
345
- # url = urllib.parse.quote(url, safe=':/') # fix encoding
346
-
347
- filename = unidecode(url.split('/')[-1]) if romanize else url.split('/')[-1]
348
- output_path = os.path.join(dir_, filename)
349
-
350
- if os.path.exists(output_path):
351
- return output_path
352
-
353
- if "/blob/" in url:
354
- url = url.replace("/blob/", "/resolve/")
355
-
356
- if hf_token:
357
- user_header = f'"Authorization: Bearer {hf_token}"'
358
- os.system(f"aria2c --console-log-level=error --summary-interval=10 --header={user_header} -c -x 16 -k 1M -s 16 {url} -d {dir_} -o {filename}")
359
- else:
360
- os.system(f"aria2c --optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 {url} -d {dir_} -o {filename}")
361
-
362
- return output_path
363
-
364
-
365
- def download_things(directory, url, hf_token="", civitai_api_key="", romanize=False):
366
- url = url.strip()
367
- downloaded_file_path = None
368
-
369
- if "drive.google.com" in url:
370
- downloaded_file_path = drive_down(url, directory)
371
- elif "huggingface.co" in url:
372
- downloaded_file_path = hf_down(url, directory, hf_token, romanize)
373
- elif "civitai.com" in url:
374
- if not civitai_api_key:
375
- msg = "You need an API key to download Civitai models."
376
- print(f"\033[91m{msg}\033[0m")
377
- gr.Warning(msg)
378
- return None
379
-
380
- url, civ_filename, civ_page = get_civit_params(url)
381
- if civ_page and not IS_ZERO_GPU:
382
- print(f"\033[92mCivitai model: {civ_filename} [page: {civ_page}]\033[0m")
383
-
384
- downloaded_file_path, civ_filename = civ_redirect_down(url, directory, civitai_api_key, romanize, civ_filename)
385
-
386
- if not downloaded_file_path:
387
- msg = (
388
- "Download failed.\n"
389
- "If this is due to an API limit, generating a new API key may resolve the issue.\n"
390
- "Attempting to download using the old method..."
391
- )
392
- print(msg)
393
- gr.Warning(msg)
394
- downloaded_file_path = civ_api_down(url, directory, civitai_api_key, civ_filename)
395
- else:
396
- os.system(f"aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 -d {directory} {url}")
397
-
398
- return downloaded_file_path
399
-
400
-
401
- def get_model_list(directory_path):
402
- model_list = []
403
- valid_extensions = {'.ckpt', '.pt', '.pth', '.safetensors', '.bin'}
404
-
405
- for filename in os.listdir(directory_path):
406
- if os.path.splitext(filename)[1] in valid_extensions:
407
- # name_without_extension = os.path.splitext(filename)[0]
408
- file_path = os.path.join(directory_path, filename)
409
- # model_list.append((name_without_extension, file_path))
410
- model_list.append(file_path)
411
- print('\033[34mFILE: ' + file_path + '\033[0m')
412
- return model_list
413
-
414
-
415
- def extract_parameters(input_string):
416
- parameters = {}
417
- input_string = input_string.replace("\n", "")
418
-
419
- if "Negative prompt:" not in input_string:
420
- if "Steps:" in input_string:
421
- input_string = input_string.replace("Steps:", "Negative prompt: Steps:")
422
- else:
423
- msg = "Generation data is invalid."
424
- gr.Warning(msg)
425
- print(msg)
426
- parameters["prompt"] = input_string
427
- return parameters
428
-
429
- parm = input_string.split("Negative prompt:")
430
- parameters["prompt"] = parm[0].strip()
431
- if "Steps:" not in parm[1]:
432
- parameters["neg_prompt"] = parm[1].strip()
433
- return parameters
434
- parm = parm[1].split("Steps:")
435
- parameters["neg_prompt"] = parm[0].strip()
436
- input_string = "Steps:" + parm[1]
437
-
438
- # Extracting Steps
439
- steps_match = re.search(r'Steps: (\d+)', input_string)
440
- if steps_match:
441
- parameters['Steps'] = int(steps_match.group(1))
442
-
443
- # Extracting Size
444
- size_match = re.search(r'Size: (\d+x\d+)', input_string)
445
- if size_match:
446
- parameters['Size'] = size_match.group(1)
447
- width, height = map(int, parameters['Size'].split('x'))
448
- parameters['width'] = width
449
- parameters['height'] = height
450
-
451
- # Extracting other parameters
452
- other_parameters = re.findall(r'([^,:]+): (.*?)(?=, [^,:]+:|$)', input_string)
453
- for param in other_parameters:
454
- parameters[param[0].strip()] = param[1].strip('"')
455
-
456
- return parameters
457
-
458
-
459
- def get_my_lora(link_url, romanize):
460
- l_name = ""
461
- for url in [url.strip() for url in link_url.split(',')]:
462
- if not os.path.exists(f"./loras/{url.split('/')[-1]}"):
463
- l_name = download_things(DIRECTORY_LORAS, url, HF_TOKEN, CIVITAI_API_KEY, romanize)
464
- new_lora_model_list = get_model_list(DIRECTORY_LORAS)
465
- new_lora_model_list.insert(0, "None")
466
- new_lora_model_list = new_lora_model_list + DIFFUSERS_FORMAT_LORAS
467
- msg_lora = "Downloaded"
468
- if l_name:
469
- msg_lora += f": <b>{l_name}</b>"
470
- print(msg_lora)
471
-
472
- try:
473
- # Works with non-Civitai loras.
474
- json_data = read_safetensors_header_from_file(l_name)
475
- metadata_lora = LoraHeaderInformation(json_data)
476
- msg_lora += "<br>" + metadata_lora.to_html()
477
- except Exception:
478
- pass
479
-
480
- return gr.update(
481
- choices=new_lora_model_list
482
- ), gr.update(
483
- choices=new_lora_model_list
484
- ), gr.update(
485
- choices=new_lora_model_list
486
- ), gr.update(
487
- choices=new_lora_model_list
488
- ), gr.update(
489
- choices=new_lora_model_list
490
- ), gr.update(
491
- choices=new_lora_model_list
492
- ), gr.update(
493
- choices=new_lora_model_list
494
- ), gr.update(
495
- value=msg_lora
496
- )
497
-
498
-
499
- def info_html(json_data, title, subtitle):
500
- return f"""
501
- <div style='padding: 0; border-radius: 10px;'>
502
- <p style='margin: 0; font-weight: bold;'>{title}</p>
503
- <details>
504
- <summary>Details</summary>
505
- <p style='margin: 0; font-weight: bold;'>{subtitle}</p>
506
- </details>
507
- </div>
508
- """
509
-
510
-
511
- def get_model_type(repo_id: str):
512
- api = HfApi(token=os.environ.get("HF_TOKEN")) # if use private or gated model
513
- default = "SD 1.5"
514
- try:
515
- if os.path.exists(repo_id):
516
- tag, _, _, _ = checkpoint_model_type(repo_id)
517
- return DIFFUSECRAFT_CHECKPOINT_NAME[tag]
518
- else:
519
- model = api.model_info(repo_id=repo_id, timeout=5.0)
520
- tags = model.tags
521
- for tag in tags:
522
- if tag in MODEL_TYPE_CLASS.keys():
523
- return MODEL_TYPE_CLASS.get(tag, default)
524
-
525
- except Exception:
526
- return default
527
- return default
528
-
529
-
530
- def restart_space(repo_id: str, factory_reboot: bool):
531
- api = HfApi(token=os.environ.get("HF_TOKEN"))
532
- try:
533
- runtime = api.get_space_runtime(repo_id=repo_id)
534
- if runtime.stage == "RUNNING":
535
- api.restart_space(repo_id=repo_id, factory_reboot=factory_reboot)
536
- print(f"Restarting space: {repo_id}")
537
- else:
538
- print(f"Space {repo_id} is in stage: {runtime.stage}")
539
- except Exception as e:
540
- print(e)
541
-
542
-
543
- def extract_exif_data(image):
544
- if image is None:
545
- return ""
546
-
547
- try:
548
- metadata_keys = ['parameters', 'metadata', 'prompt', 'Comment']
549
-
550
- for key in metadata_keys:
551
- if key in image.info:
552
- return image.info[key]
553
-
554
- return str(image.info)
555
-
556
- except Exception as e:
557
- return f"Error extracting metadata: {str(e)}"
558
-
559
-
560
- def create_mask_now(img, invert):
561
- import numpy as np
562
- import time
563
-
564
- time.sleep(0.5)
565
-
566
- transparent_image = img["layers"][0]
567
-
568
- # Extract the alpha channel
569
- alpha_channel = np.array(transparent_image)[:, :, 3]
570
-
571
- # Create a binary mask by thresholding the alpha channel
572
- binary_mask = alpha_channel > 1
573
-
574
- if invert:
575
- print("Invert")
576
- # Invert the binary mask so that the drawn shape is white and the rest is black
577
- binary_mask = np.invert(binary_mask)
578
-
579
- # Convert the binary mask to a 3-channel RGB mask
580
- rgb_mask = np.stack((binary_mask,) * 3, axis=-1)
581
-
582
- # Convert the mask to uint8
583
- rgb_mask = rgb_mask.astype(np.uint8) * 255
584
-
585
- return img["background"], rgb_mask
586
-
587
-
588
- def download_diffuser_repo(repo_name: str, model_type: str, revision: str = "main", token=True):
589
-
590
- variant = None
591
- if token is True and not os.environ.get("HF_TOKEN"):
592
- token = None
593
-
594
- if model_type == "SDXL":
595
- info = model_info_data(
596
- repo_name,
597
- token=token,
598
- revision=revision,
599
- timeout=5.0,
600
- )
601
-
602
- filenames = {sibling.rfilename for sibling in info.siblings}
603
- model_filenames, variant_filenames = variant_compatible_siblings(
604
- filenames, variant="fp16"
605
- )
606
-
607
- if len(variant_filenames):
608
- variant = "fp16"
609
-
610
- if model_type == "FLUX":
611
- cached_folder = snapshot_download(
612
- repo_id=repo_name,
613
- allow_patterns="transformer/*"
614
- )
615
- else:
616
- cached_folder = DiffusionPipeline.download(
617
- pretrained_model_name=repo_name,
618
- force_download=False,
619
- token=token,
620
- revision=revision,
621
- # mirror="https://hf-mirror.com",
622
- variant=variant,
623
- use_safetensors=True,
624
- trust_remote_code=False,
625
- timeout=5.0,
626
- )
627
-
628
- if isinstance(cached_folder, PosixPath):
629
- cached_folder = cached_folder.as_posix()
630
-
631
- # Task model
632
- # from huggingface_hub import hf_hub_download
633
- # hf_hub_download(
634
- # task_model,
635
- # filename="diffusion_pytorch_model.safetensors", # fix fp16 variant
636
- # )
637
-
638
- return cached_folder
639
-
640
-
641
- def get_folder_size_gb(folder_path):
642
- result = subprocess.run(["du", "-s", folder_path], capture_output=True, text=True)
643
-
644
- total_size_kb = int(result.stdout.split()[0])
645
- total_size_gb = total_size_kb / (1024 ** 2)
646
-
647
- return total_size_gb
648
-
649
-
650
- def get_used_storage_gb(path_storage=STORAGE_ROOT):
651
- try:
652
- used_gb = get_folder_size_gb(path_storage)
653
- print(f"Used Storage: {used_gb:.2f} GB")
654
- except Exception as e:
655
- used_gb = 999
656
- print(f"Error while retrieving the used storage: {e}.")
657
-
658
- return used_gb
659
-
660
-
661
- def delete_model(removal_candidate):
662
- print(f"Removing: {removal_candidate}")
663
-
664
- if os.path.exists(removal_candidate):
665
- os.remove(removal_candidate)
666
- else:
667
- diffusers_model = f"{CACHE_HF}{DIRECTORY_MODELS}--{removal_candidate.replace('/', '--')}"
668
- if os.path.isdir(diffusers_model):
669
- shutil.rmtree(diffusers_model)
670
-
671
-
672
- def clear_hf_cache():
673
- """
674
- Clears the entire Hugging Face cache at ~/.cache/huggingface.
675
- Hugging Face will re-download models as needed later.
676
- """
677
- try:
678
- if os.path.exists(CACHE_HF):
679
- shutil.rmtree(CACHE_HF, ignore_errors=True)
680
- print(f"Hugging Face cache cleared: {CACHE_HF}")
681
- else:
682
- print(f"No Hugging Face cache found at: {CACHE_HF}")
683
- except Exception as e:
684
- print(f"Error clearing Hugging Face cache: {e}")
685
-
686
-
687
- def progress_step_bar(step, total):
688
- # Calculate the percentage for the progress bar width
689
- percentage = min(100, ((step / total) * 100))
690
-
691
- return f"""
692
- <div style="position: relative; width: 100%; background-color: gray; border-radius: 5px; overflow: hidden;">
693
- <div style="width: {percentage}%; height: 17px; background-color: #800080; transition: width 0.5s;"></div>
694
- <div style="position: absolute; width: 100%; text-align: center; color: white; top: 0; line-height: 19px; font-size: 13px;">
695
- {int(percentage)}%
696
- </div>
697
- </div>
698
- """
699
-
700
-
701
- def html_template_message(msg):
702
- return f"""
703
- <div style="position: relative; width: 100%; background-color: gray; border-radius: 5px; overflow: hidden;">
704
- <div style="width: 0%; height: 17px; background-color: #800080; transition: width 0.5s;"></div>
705
- <div style="position: absolute; width: 100%; text-align: center; color: white; top: 0; line-height: 19px; font-size: 14px; font-weight: bold; text-shadow: 1px 1px 2px black;">
706
- {msg}
707
- </div>
708
- </div>
709
- """
710
-
711
-
712
- def escape_html(text):
713
- """Escapes HTML special characters in the input text."""
714
- return text.replace("<", "&lt;").replace(">", "&gt;").replace("\n", "<br>")
 
 
 
1
  import gradio as gr
2
+ from dartrs.v2 import AspectRatioTag, LengthTag, RatingTag, IdentityTag
3
+
4
+
5
+ V2_ASPECT_RATIO_OPTIONS: list[AspectRatioTag] = [
6
+ "ultra_wide",
7
+ "wide",
8
+ "square",
9
+ "tall",
10
+ "ultra_tall",
11
+ ]
12
+ V2_RATING_OPTIONS: list[RatingTag] = [
13
+ "sfw",
14
+ "general",
15
+ "sensitive",
16
+ "nsfw",
17
+ "questionable",
18
+ "explicit",
19
+ ]
20
+ V2_LENGTH_OPTIONS: list[LengthTag] = [
21
+ "very_short",
22
+ "short",
23
+ "medium",
24
+ "long",
25
+ "very_long",
26
+ ]
27
+ V2_IDENTITY_OPTIONS: list[IdentityTag] = [
28
+ "none",
29
+ "lax",
30
+ "strict",
31
+ ]
32
+
33
+
34
+ # ref: https://qiita.com/tregu148/items/fccccbbc47d966dd2fc2
35
+ def gradio_copy_text(_text: None):
36
+ gr.Info("Copied!")
37
+
38
+
39
+ COPY_ACTION_JS = """\
40
+ (inputs, _outputs) => {
41
+ // inputs is the string value of the input_text
42
+ if (inputs.trim() !== "") {
43
+ navigator.clipboard.writeText(inputs);
44
+ }
45
+ }"""
46
+
47
+
48
+ def gradio_copy_prompt(prompt: str):
49
+ gr.Info("Copied!")
50
+ return prompt