Spaces:
Running
on
Zero
Running
on
Zero
Omer Karisman
commited on
Commit
·
abe6901
1
Parent(s):
127f52e
updates
Browse files- predict.py +22 -19
- requirements.txt +3 -1
predict.py
CHANGED
|
@@ -22,25 +22,28 @@ class Predictor(BasePredictor):
|
|
| 22 |
guidance_scale: float = Input(description="Guidance scale for the model", default=3.0, ge=0.0, le=14.0),
|
| 23 |
number_of_images: int = Input(description="Number of images to generate", default=1, ge=1, le=4),
|
| 24 |
number_of_steps: int = Input(description="Number of steps for the model", default=10, ge=1, le=50),
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
depth_image_strength: float = Input(description="Depth image strength for the model, if not supplied the composition image will be used for depth", default=0.5, ge=0.0, le=1.0),
|
| 35 |
) -> List[Path]:
|
| 36 |
"""Run a single prediction on the model"""
|
| 37 |
|
| 38 |
-
base_image = Image.open(
|
| 39 |
-
composition_image = Image.open(
|
| 40 |
-
style_image = Image.open(
|
| 41 |
-
identity_image = Image.open(
|
| 42 |
-
if
|
| 43 |
-
depth_image = Image.open(
|
|
|
|
|
|
|
|
|
|
| 44 |
images = self.omni_zero.generate(
|
| 45 |
seed=seed,
|
| 46 |
prompt=prompt,
|
|
@@ -49,13 +52,13 @@ class Predictor(BasePredictor):
|
|
| 49 |
number_of_images=number_of_images,
|
| 50 |
number_of_steps=number_of_steps,
|
| 51 |
base_image=base_image,
|
| 52 |
-
base_image_strength=
|
| 53 |
composition_image=composition_image,
|
| 54 |
-
composition_image_strength=
|
| 55 |
style_image=style_image,
|
| 56 |
-
style_image_strength=
|
| 57 |
identity_image=identity_image,
|
| 58 |
-
identity_image_strength=
|
| 59 |
depth_image=depth_image,
|
| 60 |
depth_image_strength=depth_image_strength,
|
| 61 |
)
|
|
|
|
| 22 |
guidance_scale: float = Input(description="Guidance scale for the model", default=3.0, ge=0.0, le=14.0),
|
| 23 |
number_of_images: int = Input(description="Number of images to generate", default=1, ge=1, le=4),
|
| 24 |
number_of_steps: int = Input(description="Number of steps for the model", default=10, ge=1, le=50),
|
| 25 |
+
image_url: Path = Input(description="Base image for the model"),
|
| 26 |
+
image_strength: float = Input(description="Base image strength for the model", default=0.15, ge=0.0, le=1.0),
|
| 27 |
+
composition_image_url: Path = Input(description="Composition image for the model"),
|
| 28 |
+
composition_strength: float = Input(description="Composition image strength for the model", default=1.0, ge=0.0, le=1.0),
|
| 29 |
+
style_image_url: Path = Input(description="Style image for the model"),
|
| 30 |
+
style_strength: float = Input(description="Style image strength for the model", default=1.0, ge=0.0, le=1.0),
|
| 31 |
+
identity_image_url: Path = Input(description="Identity image for the model"),
|
| 32 |
+
identity_strength: float = Input(description="Identity image strength for the model", default=1.0, ge=0.0, le=1.0),
|
| 33 |
+
depth_image_url: Path = Input(description="Depth image for the model", default=None),
|
| 34 |
depth_image_strength: float = Input(description="Depth image strength for the model, if not supplied the composition image will be used for depth", default=0.5, ge=0.0, le=1.0),
|
| 35 |
) -> List[Path]:
|
| 36 |
"""Run a single prediction on the model"""
|
| 37 |
|
| 38 |
+
base_image = Image.open(image_url)
|
| 39 |
+
composition_image = Image.open(composition_image_url)
|
| 40 |
+
style_image = Image.open(style_image_url)
|
| 41 |
+
identity_image = Image.open(identity_image_url)
|
| 42 |
+
if depth_image_url is not None:
|
| 43 |
+
depth_image = Image.open(depth_image_url)
|
| 44 |
+
else:
|
| 45 |
+
depth_image = None
|
| 46 |
+
|
| 47 |
images = self.omni_zero.generate(
|
| 48 |
seed=seed,
|
| 49 |
prompt=prompt,
|
|
|
|
| 52 |
number_of_images=number_of_images,
|
| 53 |
number_of_steps=number_of_steps,
|
| 54 |
base_image=base_image,
|
| 55 |
+
base_image_strength=image_strength,
|
| 56 |
composition_image=composition_image,
|
| 57 |
+
composition_image_strength=composition_strength,
|
| 58 |
style_image=style_image,
|
| 59 |
+
style_image_strength=style_strength,
|
| 60 |
identity_image=identity_image,
|
| 61 |
+
identity_image_strength=identity_strength,
|
| 62 |
depth_image=depth_image,
|
| 63 |
depth_image_strength=depth_image_strength,
|
| 64 |
)
|
requirements.txt
CHANGED
|
@@ -3,10 +3,11 @@ diffusers==0.29.0
|
|
| 3 |
controlnet_aux==0.0.8
|
| 4 |
huggingface_hub==0.23.2
|
| 5 |
insightface==0.7.3
|
|
|
|
| 6 |
numpy==1.26.2
|
| 7 |
opencv_contrib_python==4.9.0.80
|
| 8 |
opencv_python==4.9.0.80
|
| 9 |
-
opencv_python_headless==4.
|
| 10 |
Pillow==10.1.0
|
| 11 |
torch==2.0.0
|
| 12 |
torchsde==0.2.6
|
|
@@ -15,3 +16,4 @@ onnxruntime-gpu
|
|
| 15 |
hf_transfer
|
| 16 |
gradio
|
| 17 |
spaces
|
|
|
|
|
|
| 3 |
controlnet_aux==0.0.8
|
| 4 |
huggingface_hub==0.23.2
|
| 5 |
insightface==0.7.3
|
| 6 |
+
albumentations==1.4.3
|
| 7 |
numpy==1.26.2
|
| 8 |
opencv_contrib_python==4.9.0.80
|
| 9 |
opencv_python==4.9.0.80
|
| 10 |
+
opencv_python_headless==4.9.0.80
|
| 11 |
Pillow==10.1.0
|
| 12 |
torch==2.0.0
|
| 13 |
torchsde==0.2.6
|
|
|
|
| 16 |
hf_transfer
|
| 17 |
gradio
|
| 18 |
spaces
|
| 19 |
+
pydantic<2.0.0
|