zsxkib commited on
Commit
3fe049f
·
verified ·
1 Parent(s): 06ad160

Upload folder using huggingface_hub

Browse files
README.md CHANGED
@@ -10,8 +10,8 @@ tags:
10
  - text-to-video
11
  - video
12
  - video-generation
13
- base_model: "Wan-AI/Wan2.1-I2V-14B-Diffusers"
14
- pipeline_tag: image-to-video
15
  # widget:
16
  # - text: >-
17
  # prompt
@@ -26,7 +26,7 @@ instance_prompt: SQUISH-IT
26
 
27
  ## About this LoRA
28
 
29
- This is a [LoRA](https://replicate.com/docs/guides/working-with-loras) for the Wan 14B Image-to-Video model.
30
 
31
  It can be used with diffusers or ComfyUI, and can be loaded against the Wan 14B models.
32
 
@@ -52,7 +52,7 @@ import replicate
52
 
53
  input = {
54
  "prompt": "SQUISH-IT",
55
- "lora_url": "https://huggingface.co/zsxkib/squish-pika-lora/resolve/main/wan-14b-i2v-squish-it-lora.safetensors"
56
  }
57
 
58
  output = replicate.run(
@@ -73,7 +73,7 @@ from diffusers.utils import export_to_video
73
  from diffusers import WanVidAdapter, WanVid
74
 
75
  # Load base model
76
- base_model = WanVid.from_pretrained("Wan-AI/Wan2.1-I2V-14B-Diffusers", torch_dtype=torch.float16)
77
 
78
  # Load and apply LoRA adapter
79
  adapter = WanVidAdapter.from_pretrained("zsxkib/squish-pika-lora")
@@ -84,18 +84,13 @@ prompt = "SQUISH-IT"
84
  negative_prompt = "blurry, low quality, low resolution"
85
 
86
  # Generate video frames
87
- from PIL import Image
88
-
89
- # Load input image
90
- image = Image.open("path/to/your/image.jpg").convert("RGB")
91
-
92
- # Generate video from image
93
- frames = base_model.image_to_video(
94
- image=image,
95
  prompt=prompt,
96
  negative_prompt=negative_prompt,
97
  num_inference_steps=30,
98
  guidance_scale=5.0,
 
 
99
  fps=16,
100
  num_frames=32,
101
  ).frames[0]
 
10
  - text-to-video
11
  - video
12
  - video-generation
13
+ base_model: "Wan-AI/Wan2.1-T2V-14B-Diffusers"
14
+ pipeline_tag: text-to-video
15
  # widget:
16
  # - text: >-
17
  # prompt
 
26
 
27
  ## About this LoRA
28
 
29
+ This is a [LoRA](https://replicate.com/docs/guides/working-with-loras) for the Wan 14B Text-to-Video model.
30
 
31
  It can be used with diffusers or ComfyUI, and can be loaded against the Wan 14B models.
32
 
 
52
 
53
  input = {
54
  "prompt": "SQUISH-IT",
55
+ "lora_url": "https://huggingface.co/zsxkib/squish-pika-lora/resolve/main/wan-14b-t2v-squish-it-lora.safetensors"
56
  }
57
 
58
  output = replicate.run(
 
73
  from diffusers import WanVidAdapter, WanVid
74
 
75
  # Load base model
76
+ base_model = WanVid.from_pretrained("Wan-AI/Wan2.1-T2V-14B-Diffusers", torch_dtype=torch.float16)
77
 
78
  # Load and apply LoRA adapter
79
  adapter = WanVidAdapter.from_pretrained("zsxkib/squish-pika-lora")
 
84
  negative_prompt = "blurry, low quality, low resolution"
85
 
86
  # Generate video frames
87
+ frames = base_model(
 
 
 
 
 
 
 
88
  prompt=prompt,
89
  negative_prompt=negative_prompt,
90
  num_inference_steps=30,
91
  guidance_scale=5.0,
92
+ width=832,
93
+ height=480,
94
  fps=16,
95
  num_frames=32,
96
  ).frames[0]
wan-14b-t2v-squish-it-lora.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:261ae811211fb5bc2ff86c6f97412aa08b7b10e1ae23dad61637e597e888c114
3
+ size 359257680