Spaces:
Sleeping
Sleeping
added files
Browse files- backend/openvino/custom_ov_model_vae_decoder.py +21 -0
- backend/openvino/pipelines.py +75 -0
- backend/pipelines/lcm.py +90 -0
- backend/pipelines/lcm_lora.py +25 -0
- backend/tiny_decoder.py +30 -0
- configs/lcm-models.txt +5 -0
- frontend/webui/generation_settings_ui.py +140 -0
- frontend/webui/image_to_image_ui.py +123 -0
- frontend/webui/image_variations_ui.py +105 -0
- frontend/webui/models_ui.py +85 -0
- image_ops.py +15 -0
- state.py +32 -0
backend/openvino/custom_ov_model_vae_decoder.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from backend.device import is_openvino_device
|
| 2 |
+
|
| 3 |
+
if is_openvino_device():
|
| 4 |
+
from optimum.intel.openvino.modeling_diffusion import OVModelVaeDecoder
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class CustomOVModelVaeDecoder(OVModelVaeDecoder):
|
| 8 |
+
def __init__(
|
| 9 |
+
self,
|
| 10 |
+
model,
|
| 11 |
+
parent_model,
|
| 12 |
+
ov_config=None,
|
| 13 |
+
model_dir=None,
|
| 14 |
+
):
|
| 15 |
+
super(OVModelVaeDecoder, self).__init__(
|
| 16 |
+
model,
|
| 17 |
+
parent_model,
|
| 18 |
+
ov_config,
|
| 19 |
+
"vae_decoder",
|
| 20 |
+
model_dir,
|
| 21 |
+
)
|
backend/openvino/pipelines.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from constants import DEVICE, LCM_DEFAULT_MODEL_OPENVINO
|
| 2 |
+
from backend.tiny_decoder import get_tiny_decoder_vae_model
|
| 3 |
+
from typing import Any
|
| 4 |
+
from backend.device import is_openvino_device
|
| 5 |
+
from paths import get_base_folder_name
|
| 6 |
+
|
| 7 |
+
if is_openvino_device():
|
| 8 |
+
from huggingface_hub import snapshot_download
|
| 9 |
+
from optimum.intel.openvino.modeling_diffusion import OVBaseModel
|
| 10 |
+
|
| 11 |
+
from optimum.intel.openvino.modeling_diffusion import (
|
| 12 |
+
OVStableDiffusionPipeline,
|
| 13 |
+
OVStableDiffusionImg2ImgPipeline,
|
| 14 |
+
OVStableDiffusionXLPipeline,
|
| 15 |
+
OVStableDiffusionXLImg2ImgPipeline,
|
| 16 |
+
)
|
| 17 |
+
from backend.openvino.custom_ov_model_vae_decoder import CustomOVModelVaeDecoder
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def ov_load_taesd(
|
| 21 |
+
pipeline: Any,
|
| 22 |
+
use_local_model: bool = False,
|
| 23 |
+
):
|
| 24 |
+
taesd_dir = snapshot_download(
|
| 25 |
+
repo_id=get_tiny_decoder_vae_model(pipeline.__class__.__name__),
|
| 26 |
+
local_files_only=use_local_model,
|
| 27 |
+
)
|
| 28 |
+
pipeline.vae_decoder = CustomOVModelVaeDecoder(
|
| 29 |
+
model=OVBaseModel.load_model(f"{taesd_dir}/vae_decoder/openvino_model.xml"),
|
| 30 |
+
parent_model=pipeline,
|
| 31 |
+
model_dir=taesd_dir,
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def get_ov_text_to_image_pipeline(
|
| 36 |
+
model_id: str = LCM_DEFAULT_MODEL_OPENVINO,
|
| 37 |
+
use_local_model: bool = False,
|
| 38 |
+
) -> Any:
|
| 39 |
+
if "xl" in get_base_folder_name(model_id).lower():
|
| 40 |
+
pipeline = OVStableDiffusionXLPipeline.from_pretrained(
|
| 41 |
+
model_id,
|
| 42 |
+
local_files_only=use_local_model,
|
| 43 |
+
ov_config={"CACHE_DIR": ""},
|
| 44 |
+
device=DEVICE.upper(),
|
| 45 |
+
)
|
| 46 |
+
else:
|
| 47 |
+
pipeline = OVStableDiffusionPipeline.from_pretrained(
|
| 48 |
+
model_id,
|
| 49 |
+
local_files_only=use_local_model,
|
| 50 |
+
ov_config={"CACHE_DIR": ""},
|
| 51 |
+
device=DEVICE.upper(),
|
| 52 |
+
)
|
| 53 |
+
|
| 54 |
+
return pipeline
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def get_ov_image_to_image_pipeline(
|
| 58 |
+
model_id: str = LCM_DEFAULT_MODEL_OPENVINO,
|
| 59 |
+
use_local_model: bool = False,
|
| 60 |
+
) -> Any:
|
| 61 |
+
if "xl" in get_base_folder_name(model_id).lower():
|
| 62 |
+
pipeline = OVStableDiffusionXLImg2ImgPipeline.from_pretrained(
|
| 63 |
+
model_id,
|
| 64 |
+
local_files_only=use_local_model,
|
| 65 |
+
ov_config={"CACHE_DIR": ""},
|
| 66 |
+
device=DEVICE.upper(),
|
| 67 |
+
)
|
| 68 |
+
else:
|
| 69 |
+
pipeline = OVStableDiffusionImg2ImgPipeline.from_pretrained(
|
| 70 |
+
model_id,
|
| 71 |
+
local_files_only=use_local_model,
|
| 72 |
+
ov_config={"CACHE_DIR": ""},
|
| 73 |
+
device=DEVICE.upper(),
|
| 74 |
+
)
|
| 75 |
+
return pipeline
|
backend/pipelines/lcm.py
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from constants import LCM_DEFAULT_MODEL
|
| 2 |
+
from diffusers import (
|
| 3 |
+
DiffusionPipeline,
|
| 4 |
+
AutoencoderTiny,
|
| 5 |
+
UNet2DConditionModel,
|
| 6 |
+
LCMScheduler,
|
| 7 |
+
)
|
| 8 |
+
import torch
|
| 9 |
+
from backend.tiny_decoder import get_tiny_decoder_vae_model
|
| 10 |
+
from typing import Any
|
| 11 |
+
from diffusers import (
|
| 12 |
+
LCMScheduler,
|
| 13 |
+
StableDiffusionImg2ImgPipeline,
|
| 14 |
+
StableDiffusionXLImg2ImgPipeline,
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def _get_lcm_pipeline_from_base_model(
|
| 19 |
+
lcm_model_id: str,
|
| 20 |
+
base_model_id: str,
|
| 21 |
+
use_local_model: bool,
|
| 22 |
+
):
|
| 23 |
+
pipeline = None
|
| 24 |
+
unet = UNet2DConditionModel.from_pretrained(
|
| 25 |
+
lcm_model_id,
|
| 26 |
+
torch_dtype=torch.float32,
|
| 27 |
+
local_files_only=use_local_model,
|
| 28 |
+
)
|
| 29 |
+
pipeline = DiffusionPipeline.from_pretrained(
|
| 30 |
+
base_model_id,
|
| 31 |
+
unet=unet,
|
| 32 |
+
torch_dtype=torch.float32,
|
| 33 |
+
local_files_only=use_local_model,
|
| 34 |
+
)
|
| 35 |
+
pipeline.scheduler = LCMScheduler.from_config(pipeline.scheduler.config)
|
| 36 |
+
return pipeline
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def load_taesd(
|
| 40 |
+
pipeline: Any,
|
| 41 |
+
use_local_model: bool = False,
|
| 42 |
+
torch_data_type: torch.dtype = torch.float32,
|
| 43 |
+
):
|
| 44 |
+
vae_model = get_tiny_decoder_vae_model(pipeline.__class__.__name__)
|
| 45 |
+
pipeline.vae = AutoencoderTiny.from_pretrained(
|
| 46 |
+
vae_model,
|
| 47 |
+
torch_dtype=torch_data_type,
|
| 48 |
+
local_files_only=use_local_model,
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def get_lcm_model_pipeline(
|
| 53 |
+
model_id: str = LCM_DEFAULT_MODEL,
|
| 54 |
+
use_local_model: bool = False,
|
| 55 |
+
):
|
| 56 |
+
pipeline = None
|
| 57 |
+
if model_id == "latent-consistency/lcm-sdxl":
|
| 58 |
+
pipeline = _get_lcm_pipeline_from_base_model(
|
| 59 |
+
model_id,
|
| 60 |
+
"stabilityai/stable-diffusion-xl-base-1.0",
|
| 61 |
+
use_local_model,
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
elif model_id == "latent-consistency/lcm-ssd-1b":
|
| 65 |
+
pipeline = _get_lcm_pipeline_from_base_model(
|
| 66 |
+
model_id,
|
| 67 |
+
"segmind/SSD-1B",
|
| 68 |
+
use_local_model,
|
| 69 |
+
)
|
| 70 |
+
else:
|
| 71 |
+
pipeline = DiffusionPipeline.from_pretrained(
|
| 72 |
+
model_id,
|
| 73 |
+
local_files_only=use_local_model,
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
return pipeline
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def get_image_to_image_pipeline(pipeline: Any) -> Any:
|
| 80 |
+
components = pipeline.components
|
| 81 |
+
pipeline_class = pipeline.__class__.__name__
|
| 82 |
+
if (
|
| 83 |
+
pipeline_class == "LatentConsistencyModelPipeline"
|
| 84 |
+
or pipeline_class == "StableDiffusionPipeline"
|
| 85 |
+
):
|
| 86 |
+
return StableDiffusionImg2ImgPipeline(**components)
|
| 87 |
+
elif pipeline_class == "StableDiffusionXLPipeline":
|
| 88 |
+
return StableDiffusionXLImg2ImgPipeline(**components)
|
| 89 |
+
else:
|
| 90 |
+
raise Exception(f"Unknown pipeline {pipeline_class}")
|
backend/pipelines/lcm_lora.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from diffusers import DiffusionPipeline, LCMScheduler
|
| 2 |
+
import torch
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def get_lcm_lora_pipeline(
|
| 6 |
+
base_model_id: str,
|
| 7 |
+
lcm_lora_id: str,
|
| 8 |
+
use_local_model: bool,
|
| 9 |
+
torch_data_type: torch.dtype,
|
| 10 |
+
):
|
| 11 |
+
pipeline = DiffusionPipeline.from_pretrained(
|
| 12 |
+
base_model_id,
|
| 13 |
+
torch_dtype=torch_data_type,
|
| 14 |
+
local_files_only=use_local_model,
|
| 15 |
+
)
|
| 16 |
+
pipeline.load_lora_weights(
|
| 17 |
+
lcm_lora_id,
|
| 18 |
+
local_files_only=use_local_model,
|
| 19 |
+
)
|
| 20 |
+
if "lcm" in lcm_lora_id.lower():
|
| 21 |
+
print("LCM LoRA model detected so using recommended LCMScheduler")
|
| 22 |
+
pipeline.scheduler = LCMScheduler.from_config(pipeline.scheduler.config)
|
| 23 |
+
pipeline.fuse_lora()
|
| 24 |
+
pipeline.unet.to(memory_format=torch.channels_last)
|
| 25 |
+
return pipeline
|
backend/tiny_decoder.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from constants import (
|
| 2 |
+
TAESD_MODEL,
|
| 3 |
+
TAESDXL_MODEL,
|
| 4 |
+
TAESD_MODEL_OPENVINO,
|
| 5 |
+
TAESDXL_MODEL_OPENVINO,
|
| 6 |
+
)
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def get_tiny_decoder_vae_model(pipeline_class) -> str:
|
| 10 |
+
print(f"Pipeline class : {pipeline_class}")
|
| 11 |
+
if (
|
| 12 |
+
pipeline_class == "LatentConsistencyModelPipeline"
|
| 13 |
+
or pipeline_class == "StableDiffusionPipeline"
|
| 14 |
+
or pipeline_class == "StableDiffusionImg2ImgPipeline"
|
| 15 |
+
):
|
| 16 |
+
return TAESD_MODEL
|
| 17 |
+
elif (
|
| 18 |
+
pipeline_class == "StableDiffusionXLPipeline"
|
| 19 |
+
or pipeline_class == "StableDiffusionXLImg2ImgPipeline"
|
| 20 |
+
):
|
| 21 |
+
return TAESDXL_MODEL
|
| 22 |
+
elif (
|
| 23 |
+
pipeline_class == "OVStableDiffusionPipeline"
|
| 24 |
+
or pipeline_class == "OVStableDiffusionImg2ImgPipeline"
|
| 25 |
+
):
|
| 26 |
+
return TAESD_MODEL_OPENVINO
|
| 27 |
+
elif pipeline_class == "OVStableDiffusionXLPipeline":
|
| 28 |
+
return TAESDXL_MODEL_OPENVINO
|
| 29 |
+
else:
|
| 30 |
+
raise Exception("No valid pipeline class found!")
|
configs/lcm-models.txt
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
stabilityai/sd-turbo
|
| 2 |
+
stabilityai/sdxl-turbo
|
| 3 |
+
SimianLuo/LCM_Dreamshaper_v7
|
| 4 |
+
latent-consistency/lcm-sdxl
|
| 5 |
+
latent-consistency/lcm-ssd-1b
|
frontend/webui/generation_settings_ui.py
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from state import get_settings
|
| 3 |
+
|
| 4 |
+
app_settings = get_settings()
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def on_change_inference_steps(steps):
|
| 8 |
+
app_settings.settings.lcm_diffusion_setting.inference_steps = steps
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def on_change_image_width(img_width):
|
| 12 |
+
app_settings.settings.lcm_diffusion_setting.image_width = img_width
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def on_change_image_height(img_height):
|
| 16 |
+
app_settings.settings.lcm_diffusion_setting.image_height = img_height
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def on_change_num_images(num_images):
|
| 20 |
+
app_settings.settings.lcm_diffusion_setting.number_of_images = num_images
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def on_change_guidance_scale(guidance_scale):
|
| 24 |
+
app_settings.settings.lcm_diffusion_setting.guidance_scale = guidance_scale
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def on_change_seed_value(seed):
|
| 28 |
+
app_settings.settings.lcm_diffusion_setting.seed = seed
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def on_change_seed_checkbox(seed_checkbox):
|
| 32 |
+
app_settings.settings.lcm_diffusion_setting.use_seed = seed_checkbox
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def on_change_safety_checker_checkbox(safety_checker_checkbox):
|
| 36 |
+
app_settings.settings.lcm_diffusion_setting.use_safety_checker = (
|
| 37 |
+
safety_checker_checkbox
|
| 38 |
+
)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def on_change_tiny_auto_encoder_checkbox(tiny_auto_encoder_checkbox):
|
| 42 |
+
app_settings.settings.lcm_diffusion_setting.use_tiny_auto_encoder = (
|
| 43 |
+
tiny_auto_encoder_checkbox
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def on_offline_checkbox(offline_checkbox):
|
| 48 |
+
app_settings.settings.lcm_diffusion_setting.use_offline_model = offline_checkbox
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def get_generation_settings_ui() -> None:
|
| 52 |
+
with gr.Blocks():
|
| 53 |
+
with gr.Row():
|
| 54 |
+
with gr.Column():
|
| 55 |
+
num_inference_steps = gr.Slider(
|
| 56 |
+
1,
|
| 57 |
+
25,
|
| 58 |
+
value=app_settings.settings.lcm_diffusion_setting.inference_steps,
|
| 59 |
+
step=1,
|
| 60 |
+
label="Inference Steps",
|
| 61 |
+
interactive=True,
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
image_height = gr.Slider(
|
| 65 |
+
256,
|
| 66 |
+
1024,
|
| 67 |
+
value=app_settings.settings.lcm_diffusion_setting.image_height,
|
| 68 |
+
step=256,
|
| 69 |
+
label="Image Height",
|
| 70 |
+
interactive=True,
|
| 71 |
+
)
|
| 72 |
+
image_width = gr.Slider(
|
| 73 |
+
256,
|
| 74 |
+
1024,
|
| 75 |
+
value=app_settings.settings.lcm_diffusion_setting.image_width,
|
| 76 |
+
step=256,
|
| 77 |
+
label="Image Width",
|
| 78 |
+
interactive=True,
|
| 79 |
+
)
|
| 80 |
+
num_images = gr.Slider(
|
| 81 |
+
1,
|
| 82 |
+
50,
|
| 83 |
+
value=app_settings.settings.lcm_diffusion_setting.number_of_images,
|
| 84 |
+
step=1,
|
| 85 |
+
label="Number of images to generate",
|
| 86 |
+
interactive=True,
|
| 87 |
+
)
|
| 88 |
+
guidance_scale = gr.Slider(
|
| 89 |
+
1.0,
|
| 90 |
+
2.0,
|
| 91 |
+
value=app_settings.settings.lcm_diffusion_setting.guidance_scale,
|
| 92 |
+
step=0.1,
|
| 93 |
+
label="Guidance Scale",
|
| 94 |
+
interactive=True,
|
| 95 |
+
)
|
| 96 |
+
|
| 97 |
+
seed = gr.Slider(
|
| 98 |
+
value=app_settings.settings.lcm_diffusion_setting.seed,
|
| 99 |
+
minimum=0,
|
| 100 |
+
maximum=999999999,
|
| 101 |
+
label="Seed",
|
| 102 |
+
step=1,
|
| 103 |
+
interactive=True,
|
| 104 |
+
)
|
| 105 |
+
seed_checkbox = gr.Checkbox(
|
| 106 |
+
label="Use seed",
|
| 107 |
+
value=app_settings.settings.lcm_diffusion_setting.use_seed,
|
| 108 |
+
interactive=True,
|
| 109 |
+
)
|
| 110 |
+
|
| 111 |
+
safety_checker_checkbox = gr.Checkbox(
|
| 112 |
+
label="Use Safety Checker",
|
| 113 |
+
value=app_settings.settings.lcm_diffusion_setting.use_safety_checker,
|
| 114 |
+
interactive=True,
|
| 115 |
+
)
|
| 116 |
+
tiny_auto_encoder_checkbox = gr.Checkbox(
|
| 117 |
+
label="Use tiny auto encoder for SD",
|
| 118 |
+
value=app_settings.settings.lcm_diffusion_setting.use_tiny_auto_encoder,
|
| 119 |
+
interactive=True,
|
| 120 |
+
)
|
| 121 |
+
offline_checkbox = gr.Checkbox(
|
| 122 |
+
label="Use locally cached model or downloaded model folder(offline)",
|
| 123 |
+
value=app_settings.settings.lcm_diffusion_setting.use_offline_model,
|
| 124 |
+
interactive=True,
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
num_inference_steps.change(on_change_inference_steps, num_inference_steps)
|
| 128 |
+
image_height.change(on_change_image_height, image_height)
|
| 129 |
+
image_width.change(on_change_image_width, image_width)
|
| 130 |
+
num_images.change(on_change_num_images, num_images)
|
| 131 |
+
guidance_scale.change(on_change_guidance_scale, guidance_scale)
|
| 132 |
+
seed.change(on_change_seed_value, seed)
|
| 133 |
+
seed_checkbox.change(on_change_seed_checkbox, seed_checkbox)
|
| 134 |
+
safety_checker_checkbox.change(
|
| 135 |
+
on_change_safety_checker_checkbox, safety_checker_checkbox
|
| 136 |
+
)
|
| 137 |
+
tiny_auto_encoder_checkbox.change(
|
| 138 |
+
on_change_tiny_auto_encoder_checkbox, tiny_auto_encoder_checkbox
|
| 139 |
+
)
|
| 140 |
+
offline_checkbox.change(on_offline_checkbox, offline_checkbox)
|
frontend/webui/image_to_image_ui.py
ADDED
|
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any
|
| 2 |
+
import gradio as gr
|
| 3 |
+
from backend.models.lcmdiffusion_setting import DiffusionTask
|
| 4 |
+
from models.interface_types import InterfaceType
|
| 5 |
+
from frontend.utils import is_reshape_required
|
| 6 |
+
from constants import DEVICE
|
| 7 |
+
from state import get_settings, get_context
|
| 8 |
+
from concurrent.futures import ThreadPoolExecutor
|
| 9 |
+
|
| 10 |
+
app_settings = get_settings()
|
| 11 |
+
|
| 12 |
+
context = get_context(InterfaceType.WEBUI)
|
| 13 |
+
previous_width = 0
|
| 14 |
+
previous_height = 0
|
| 15 |
+
previous_model_id = ""
|
| 16 |
+
previous_num_of_images = 0
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def generate_image_to_image(
|
| 20 |
+
prompt,
|
| 21 |
+
negative_prompt,
|
| 22 |
+
init_image,
|
| 23 |
+
strength,
|
| 24 |
+
) -> Any:
|
| 25 |
+
global previous_height, previous_width, previous_model_id, previous_num_of_images, app_settings
|
| 26 |
+
|
| 27 |
+
app_settings.settings.lcm_diffusion_setting.prompt = prompt
|
| 28 |
+
app_settings.settings.lcm_diffusion_setting.negative_prompt = negative_prompt
|
| 29 |
+
app_settings.settings.lcm_diffusion_setting.init_image = init_image
|
| 30 |
+
app_settings.settings.lcm_diffusion_setting.strength = strength
|
| 31 |
+
|
| 32 |
+
app_settings.settings.lcm_diffusion_setting.diffusion_task = (
|
| 33 |
+
DiffusionTask.image_to_image.value
|
| 34 |
+
)
|
| 35 |
+
model_id = app_settings.settings.lcm_diffusion_setting.openvino_lcm_model_id
|
| 36 |
+
reshape = False
|
| 37 |
+
image_width = app_settings.settings.lcm_diffusion_setting.image_width
|
| 38 |
+
image_height = app_settings.settings.lcm_diffusion_setting.image_height
|
| 39 |
+
num_images = app_settings.settings.lcm_diffusion_setting.number_of_images
|
| 40 |
+
if app_settings.settings.lcm_diffusion_setting.use_openvino:
|
| 41 |
+
reshape = is_reshape_required(
|
| 42 |
+
previous_width,
|
| 43 |
+
image_width,
|
| 44 |
+
previous_height,
|
| 45 |
+
image_height,
|
| 46 |
+
previous_model_id,
|
| 47 |
+
model_id,
|
| 48 |
+
previous_num_of_images,
|
| 49 |
+
num_images,
|
| 50 |
+
)
|
| 51 |
+
|
| 52 |
+
with ThreadPoolExecutor(max_workers=1) as executor:
|
| 53 |
+
future = executor.submit(
|
| 54 |
+
context.generate_text_to_image,
|
| 55 |
+
app_settings.settings,
|
| 56 |
+
reshape,
|
| 57 |
+
DEVICE,
|
| 58 |
+
)
|
| 59 |
+
images = future.result()
|
| 60 |
+
# images = context.generate_text_to_image(
|
| 61 |
+
# app_settings.settings,
|
| 62 |
+
# reshape,
|
| 63 |
+
# DEVICE,
|
| 64 |
+
# )
|
| 65 |
+
previous_width = image_width
|
| 66 |
+
previous_height = image_height
|
| 67 |
+
previous_model_id = model_id
|
| 68 |
+
previous_num_of_images = num_images
|
| 69 |
+
return images
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def get_image_to_image_ui() -> None:
|
| 73 |
+
with gr.Blocks():
|
| 74 |
+
with gr.Row():
|
| 75 |
+
with gr.Column():
|
| 76 |
+
input_image = gr.Image(label="Init image", type="pil")
|
| 77 |
+
with gr.Row():
|
| 78 |
+
prompt = gr.Textbox(
|
| 79 |
+
show_label=False,
|
| 80 |
+
lines=3,
|
| 81 |
+
placeholder="A fantasy landscape",
|
| 82 |
+
container=False,
|
| 83 |
+
)
|
| 84 |
+
|
| 85 |
+
generate_btn = gr.Button(
|
| 86 |
+
"Generate",
|
| 87 |
+
elem_id="generate_button",
|
| 88 |
+
scale=0,
|
| 89 |
+
)
|
| 90 |
+
negative_prompt = gr.Textbox(
|
| 91 |
+
label="Negative prompt (Works in LCM-LoRA mode, set guidance > 1.0):",
|
| 92 |
+
lines=1,
|
| 93 |
+
placeholder="",
|
| 94 |
+
)
|
| 95 |
+
strength = gr.Slider(
|
| 96 |
+
0.1,
|
| 97 |
+
1,
|
| 98 |
+
value=app_settings.settings.lcm_diffusion_setting.strength,
|
| 99 |
+
step=0.01,
|
| 100 |
+
label="Strength",
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
input_params = [
|
| 104 |
+
prompt,
|
| 105 |
+
negative_prompt,
|
| 106 |
+
input_image,
|
| 107 |
+
strength,
|
| 108 |
+
]
|
| 109 |
+
|
| 110 |
+
with gr.Column():
|
| 111 |
+
output = gr.Gallery(
|
| 112 |
+
label="Generated images",
|
| 113 |
+
show_label=True,
|
| 114 |
+
elem_id="gallery",
|
| 115 |
+
columns=2,
|
| 116 |
+
height=512,
|
| 117 |
+
)
|
| 118 |
+
|
| 119 |
+
generate_btn.click(
|
| 120 |
+
fn=generate_image_to_image,
|
| 121 |
+
inputs=input_params,
|
| 122 |
+
outputs=output,
|
| 123 |
+
)
|
frontend/webui/image_variations_ui.py
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any
|
| 2 |
+
import gradio as gr
|
| 3 |
+
from backend.models.lcmdiffusion_setting import DiffusionTask
|
| 4 |
+
from context import Context
|
| 5 |
+
from models.interface_types import InterfaceType
|
| 6 |
+
from frontend.utils import is_reshape_required
|
| 7 |
+
from constants import DEVICE
|
| 8 |
+
from state import get_settings, get_context
|
| 9 |
+
from concurrent.futures import ThreadPoolExecutor
|
| 10 |
+
|
| 11 |
+
app_settings = get_settings()
|
| 12 |
+
|
| 13 |
+
context = get_context(InterfaceType.WEBUI)
|
| 14 |
+
previous_width = 0
|
| 15 |
+
previous_height = 0
|
| 16 |
+
previous_model_id = ""
|
| 17 |
+
previous_num_of_images = 0
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def generate_image_variations(
|
| 21 |
+
init_image,
|
| 22 |
+
variation_strength,
|
| 23 |
+
) -> Any:
|
| 24 |
+
global previous_height, previous_width, previous_model_id, previous_num_of_images, app_settings
|
| 25 |
+
|
| 26 |
+
app_settings.settings.lcm_diffusion_setting.init_image = init_image
|
| 27 |
+
app_settings.settings.lcm_diffusion_setting.strength = variation_strength
|
| 28 |
+
app_settings.settings.lcm_diffusion_setting.prompt = ""
|
| 29 |
+
app_settings.settings.lcm_diffusion_setting.negative_prompt = ""
|
| 30 |
+
|
| 31 |
+
app_settings.settings.lcm_diffusion_setting.diffusion_task = (
|
| 32 |
+
DiffusionTask.image_to_image.value
|
| 33 |
+
)
|
| 34 |
+
model_id = app_settings.settings.lcm_diffusion_setting.openvino_lcm_model_id
|
| 35 |
+
reshape = False
|
| 36 |
+
image_width = app_settings.settings.lcm_diffusion_setting.image_width
|
| 37 |
+
image_height = app_settings.settings.lcm_diffusion_setting.image_height
|
| 38 |
+
num_images = app_settings.settings.lcm_diffusion_setting.number_of_images
|
| 39 |
+
if app_settings.settings.lcm_diffusion_setting.use_openvino:
|
| 40 |
+
reshape = is_reshape_required(
|
| 41 |
+
previous_width,
|
| 42 |
+
image_width,
|
| 43 |
+
previous_height,
|
| 44 |
+
image_height,
|
| 45 |
+
previous_model_id,
|
| 46 |
+
model_id,
|
| 47 |
+
previous_num_of_images,
|
| 48 |
+
num_images,
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
with ThreadPoolExecutor(max_workers=1) as executor:
|
| 52 |
+
future = executor.submit(
|
| 53 |
+
context.generate_text_to_image,
|
| 54 |
+
app_settings.settings,
|
| 55 |
+
reshape,
|
| 56 |
+
DEVICE,
|
| 57 |
+
)
|
| 58 |
+
images = future.result()
|
| 59 |
+
|
| 60 |
+
previous_width = image_width
|
| 61 |
+
previous_height = image_height
|
| 62 |
+
previous_model_id = model_id
|
| 63 |
+
previous_num_of_images = num_images
|
| 64 |
+
return images
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def get_image_variations_ui() -> None:
|
| 68 |
+
with gr.Blocks():
|
| 69 |
+
with gr.Row():
|
| 70 |
+
with gr.Column():
|
| 71 |
+
input_image = gr.Image(label="Init image", type="pil")
|
| 72 |
+
with gr.Row():
|
| 73 |
+
generate_btn = gr.Button(
|
| 74 |
+
"Generate",
|
| 75 |
+
elem_id="generate_button",
|
| 76 |
+
scale=0,
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
variation_strength = gr.Slider(
|
| 80 |
+
0.1,
|
| 81 |
+
1,
|
| 82 |
+
value=0.4,
|
| 83 |
+
step=0.01,
|
| 84 |
+
label="Variations Strength",
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
input_params = [
|
| 88 |
+
input_image,
|
| 89 |
+
variation_strength,
|
| 90 |
+
]
|
| 91 |
+
|
| 92 |
+
with gr.Column():
|
| 93 |
+
output = gr.Gallery(
|
| 94 |
+
label="Generated images",
|
| 95 |
+
show_label=True,
|
| 96 |
+
elem_id="gallery",
|
| 97 |
+
columns=2,
|
| 98 |
+
height=512,
|
| 99 |
+
)
|
| 100 |
+
|
| 101 |
+
generate_btn.click(
|
| 102 |
+
fn=generate_image_variations,
|
| 103 |
+
inputs=input_params,
|
| 104 |
+
outputs=output,
|
| 105 |
+
)
|
frontend/webui/models_ui.py
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from app_settings import AppSettings
|
| 2 |
+
from typing import Any
|
| 3 |
+
import gradio as gr
|
| 4 |
+
from constants import LCM_DEFAULT_MODEL, LCM_DEFAULT_MODEL_OPENVINO
|
| 5 |
+
from state import get_settings
|
| 6 |
+
from frontend.utils import get_valid_model_id
|
| 7 |
+
|
| 8 |
+
app_settings = get_settings()
|
| 9 |
+
app_settings.settings.lcm_diffusion_setting.openvino_lcm_model_id = get_valid_model_id(
|
| 10 |
+
app_settings.openvino_lcm_models,
|
| 11 |
+
app_settings.settings.lcm_diffusion_setting.openvino_lcm_model_id,
|
| 12 |
+
)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def change_lcm_model_id(model_id):
|
| 16 |
+
app_settings.settings.lcm_diffusion_setting.lcm_model_id = model_id
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def change_lcm_lora_model_id(model_id):
|
| 20 |
+
app_settings.settings.lcm_diffusion_setting.lcm_lora.lcm_lora_id = model_id
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def change_lcm_lora_base_model_id(model_id):
|
| 24 |
+
app_settings.settings.lcm_diffusion_setting.lcm_lora.base_model_id = model_id
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def change_openvino_lcm_model_id(model_id):
|
| 28 |
+
app_settings.settings.lcm_diffusion_setting.openvino_lcm_model_id = model_id
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def get_models_ui() -> None:
|
| 32 |
+
with gr.Blocks():
|
| 33 |
+
with gr.Row():
|
| 34 |
+
lcm_model_id = gr.Dropdown(
|
| 35 |
+
app_settings.lcm_models,
|
| 36 |
+
label="LCM model",
|
| 37 |
+
info="Diffusers LCM model ID",
|
| 38 |
+
value=get_valid_model_id(
|
| 39 |
+
app_settings.lcm_models,
|
| 40 |
+
app_settings.settings.lcm_diffusion_setting.lcm_model_id,
|
| 41 |
+
LCM_DEFAULT_MODEL,
|
| 42 |
+
),
|
| 43 |
+
interactive=True,
|
| 44 |
+
)
|
| 45 |
+
with gr.Row():
|
| 46 |
+
lcm_lora_model_id = gr.Dropdown(
|
| 47 |
+
app_settings.lcm_lora_models,
|
| 48 |
+
label="LCM LoRA model",
|
| 49 |
+
info="Diffusers LCM LoRA model ID",
|
| 50 |
+
value=get_valid_model_id(
|
| 51 |
+
app_settings.lcm_lora_models,
|
| 52 |
+
app_settings.settings.lcm_diffusion_setting.lcm_lora.lcm_lora_id,
|
| 53 |
+
),
|
| 54 |
+
interactive=True,
|
| 55 |
+
)
|
| 56 |
+
lcm_lora_base_model_id = gr.Dropdown(
|
| 57 |
+
app_settings.stable_diffsuion_models,
|
| 58 |
+
label="LCM LoRA base model",
|
| 59 |
+
info="Diffusers LCM LoRA base model ID",
|
| 60 |
+
value=get_valid_model_id(
|
| 61 |
+
app_settings.stable_diffsuion_models,
|
| 62 |
+
app_settings.settings.lcm_diffusion_setting.lcm_lora.base_model_id,
|
| 63 |
+
),
|
| 64 |
+
interactive=True,
|
| 65 |
+
)
|
| 66 |
+
with gr.Row():
|
| 67 |
+
lcm_openvino_model_id = gr.Dropdown(
|
| 68 |
+
app_settings.openvino_lcm_models,
|
| 69 |
+
label="LCM OpenVINO model",
|
| 70 |
+
info="OpenVINO LCM-LoRA fused model ID",
|
| 71 |
+
value=get_valid_model_id(
|
| 72 |
+
app_settings.openvino_lcm_models,
|
| 73 |
+
app_settings.settings.lcm_diffusion_setting.openvino_lcm_model_id,
|
| 74 |
+
),
|
| 75 |
+
interactive=True,
|
| 76 |
+
)
|
| 77 |
+
|
| 78 |
+
lcm_model_id.change(change_lcm_model_id, lcm_model_id)
|
| 79 |
+
lcm_lora_model_id.change(change_lcm_lora_model_id, lcm_lora_model_id)
|
| 80 |
+
lcm_lora_base_model_id.change(
|
| 81 |
+
change_lcm_lora_base_model_id, lcm_lora_base_model_id
|
| 82 |
+
)
|
| 83 |
+
lcm_openvino_model_id.change(
|
| 84 |
+
change_openvino_lcm_model_id, lcm_openvino_model_id
|
| 85 |
+
)
|
image_ops.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from PIL import Image
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def resize_pil_image(
|
| 5 |
+
pil_image: Image,
|
| 6 |
+
image_width,
|
| 7 |
+
image_height,
|
| 8 |
+
):
|
| 9 |
+
return pil_image.convert("RGB").resize(
|
| 10 |
+
(
|
| 11 |
+
image_width,
|
| 12 |
+
image_height,
|
| 13 |
+
),
|
| 14 |
+
Image.Resampling.LANCZOS,
|
| 15 |
+
)
|
state.py
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from app_settings import AppSettings
|
| 2 |
+
from typing import Optional
|
| 3 |
+
|
| 4 |
+
from context import Context
|
| 5 |
+
from models.interface_types import InterfaceType
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class _AppState:
|
| 9 |
+
_instance: Optional["_AppState"] = None
|
| 10 |
+
settings: Optional[AppSettings] = None
|
| 11 |
+
context: Optional[Context] = None
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def get_state() -> _AppState:
|
| 15 |
+
if _AppState._instance is None:
|
| 16 |
+
_AppState._instance = _AppState()
|
| 17 |
+
return _AppState._instance
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def get_settings(skip_file: bool = False) -> AppSettings:
|
| 21 |
+
state = get_state()
|
| 22 |
+
if state.settings is None:
|
| 23 |
+
state.settings = AppSettings()
|
| 24 |
+
state.settings.load(skip_file)
|
| 25 |
+
return state.settings
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def get_context(interface_type: InterfaceType) -> Context:
|
| 29 |
+
state = get_state()
|
| 30 |
+
if state.context is None:
|
| 31 |
+
state.context = Context(interface_type)
|
| 32 |
+
return state.context
|