tshiamor commited on
Commit
4489e76
·
1 Parent(s): bd6ded1

Upload diffusion checkpoint at 25K (safetensors format)

Browse files
Files changed (4) hide show
  1. README.md +34 -0
  2. config.json +113 -0
  3. model.safetensors +3 -0
  4. train_config.json +223 -0
README.md ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ tags:
4
+ - diffusion-policy
5
+ - lerobot
6
+ - safetensors
7
+ - so101
8
+ ---
9
+
10
+ # Diffusion Policy: so101-block-horizontal-layComb12
11
+
12
+ Pretrained Diffusion Policy trained using the [LeRobot](https://github.com/huggingface/lerobot) framework.
13
+
14
+ ## 📦 Checkpoint Info
15
+
16
+ - Format: `safetensors`
17
+ - Model: Diffusion policy
18
+ - Trained on: `so101-block-horizontal-layComb12` dataset
19
+ - Steps: 25,000
20
+ - Final loss: ~0.013
21
+ - Config: `train_config.json`, `config.json`
22
+
23
+ ## 🔧 How to Load
24
+
25
+ ```python
26
+ from huggingface_hub import hf_hub_download
27
+ import safetensors.torch as storch
28
+ import torch
29
+
30
+ # Load model weights
31
+ model_path = hf_hub_download("tshiamor/diffussion_so101-block-horizontal-layComb12", "model.safetensors")
32
+ state_dict = storch.load_file(model_path)
33
+
34
+ # You can now load state_dict into your model architecture
config.json ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "type": "diffusion",
3
+ "n_obs_steps": 6,
4
+ "normalization_mapping": {
5
+ "VISUAL": "MEAN_STD",
6
+ "STATE": "MIN_MAX",
7
+ "ACTION": "MIN_MAX"
8
+ },
9
+ "input_features": {
10
+ "observation.state": {
11
+ "type": "STATE",
12
+ "shape": [
13
+ 6
14
+ ]
15
+ },
16
+ "observation.images.endeff": {
17
+ "type": "VISUAL",
18
+ "shape": [
19
+ 3,
20
+ 480,
21
+ 640
22
+ ]
23
+ },
24
+ "observation.images.top": {
25
+ "type": "VISUAL",
26
+ "shape": [
27
+ 3,
28
+ 480,
29
+ 640
30
+ ]
31
+ },
32
+ "observation.images.front": {
33
+ "type": "VISUAL",
34
+ "shape": [
35
+ 3,
36
+ 480,
37
+ 640
38
+ ]
39
+ },
40
+ "observation.images.side": {
41
+ "type": "VISUAL",
42
+ "shape": [
43
+ 3,
44
+ 480,
45
+ 640
46
+ ]
47
+ },
48
+ "observation.images.endeff_ir": {
49
+ "type": "VISUAL",
50
+ "shape": [
51
+ 3,
52
+ 480,
53
+ 640
54
+ ]
55
+ }
56
+ },
57
+ "output_features": {
58
+ "action": {
59
+ "type": "ACTION",
60
+ "shape": [
61
+ 6
62
+ ]
63
+ }
64
+ },
65
+ "device": "cuda",
66
+ "use_amp": false,
67
+ "push_to_hub": true,
68
+ "repo_id": "tshiamor/diffussion_so101-block-horizontal-layComb12",
69
+ "private": null,
70
+ "tags": null,
71
+ "license": null,
72
+ "horizon": 16,
73
+ "n_action_steps": 8,
74
+ "drop_n_last_frames": 7,
75
+ "vision_backbone": "resnet18",
76
+ "crop_shape": [
77
+ 84,
78
+ 84
79
+ ],
80
+ "crop_is_random": true,
81
+ "pretrained_backbone_weights": null,
82
+ "use_group_norm": true,
83
+ "spatial_softmax_num_keypoints": 32,
84
+ "use_separate_rgb_encoder_per_camera": false,
85
+ "down_dims": [
86
+ 64,
87
+ 128,
88
+ 256
89
+ ],
90
+ "kernel_size": 5,
91
+ "n_groups": 8,
92
+ "diffusion_step_embed_dim": 128,
93
+ "use_film_scale_modulation": true,
94
+ "noise_scheduler_type": "DDPM",
95
+ "num_train_timesteps": 100,
96
+ "beta_schedule": "squaredcos_cap_v2",
97
+ "beta_start": 0.0001,
98
+ "beta_end": 0.02,
99
+ "prediction_type": "epsilon",
100
+ "clip_sample": true,
101
+ "clip_sample_range": 1.0,
102
+ "num_inference_steps": null,
103
+ "do_mask_loss_for_padding": false,
104
+ "optimizer_lr": 0.0001,
105
+ "optimizer_betas": [
106
+ 0.95,
107
+ 0.999
108
+ ],
109
+ "optimizer_eps": 1e-08,
110
+ "optimizer_weight_decay": 1e-06,
111
+ "scheduler_name": "cosine",
112
+ "scheduler_warmup_steps": 500
113
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e795be06e1b53f75ac977c58cc9b30a4d28353fc2c0cc1503358e65a058e3009
3
+ size 90528560
train_config.json ADDED
@@ -0,0 +1,223 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "dataset": {
3
+ "repo_id": "local",
4
+ "root": "./data/so101-block-horizontal-layComb12",
5
+ "episodes": null,
6
+ "image_transforms": {
7
+ "enable": false,
8
+ "max_num_transforms": 3,
9
+ "random_order": false,
10
+ "tfs": {
11
+ "brightness": {
12
+ "weight": 1.0,
13
+ "type": "ColorJitter",
14
+ "kwargs": {
15
+ "brightness": [
16
+ 0.8,
17
+ 1.2
18
+ ]
19
+ }
20
+ },
21
+ "contrast": {
22
+ "weight": 1.0,
23
+ "type": "ColorJitter",
24
+ "kwargs": {
25
+ "contrast": [
26
+ 0.8,
27
+ 1.2
28
+ ]
29
+ }
30
+ },
31
+ "saturation": {
32
+ "weight": 1.0,
33
+ "type": "ColorJitter",
34
+ "kwargs": {
35
+ "saturation": [
36
+ 0.5,
37
+ 1.5
38
+ ]
39
+ }
40
+ },
41
+ "hue": {
42
+ "weight": 1.0,
43
+ "type": "ColorJitter",
44
+ "kwargs": {
45
+ "hue": [
46
+ -0.05,
47
+ 0.05
48
+ ]
49
+ }
50
+ },
51
+ "sharpness": {
52
+ "weight": 1.0,
53
+ "type": "SharpnessJitter",
54
+ "kwargs": {
55
+ "sharpness": [
56
+ 0.5,
57
+ 1.5
58
+ ]
59
+ }
60
+ }
61
+ }
62
+ },
63
+ "revision": null,
64
+ "use_imagenet_stats": true,
65
+ "video_backend": "torchcodec"
66
+ },
67
+ "env": null,
68
+ "policy": {
69
+ "type": "diffusion",
70
+ "n_obs_steps": 6,
71
+ "normalization_mapping": {
72
+ "VISUAL": "MEAN_STD",
73
+ "STATE": "MIN_MAX",
74
+ "ACTION": "MIN_MAX"
75
+ },
76
+ "input_features": {
77
+ "observation.state": {
78
+ "type": "STATE",
79
+ "shape": [
80
+ 6
81
+ ]
82
+ },
83
+ "observation.images.endeff": {
84
+ "type": "VISUAL",
85
+ "shape": [
86
+ 3,
87
+ 480,
88
+ 640
89
+ ]
90
+ },
91
+ "observation.images.top": {
92
+ "type": "VISUAL",
93
+ "shape": [
94
+ 3,
95
+ 480,
96
+ 640
97
+ ]
98
+ },
99
+ "observation.images.front": {
100
+ "type": "VISUAL",
101
+ "shape": [
102
+ 3,
103
+ 480,
104
+ 640
105
+ ]
106
+ },
107
+ "observation.images.side": {
108
+ "type": "VISUAL",
109
+ "shape": [
110
+ 3,
111
+ 480,
112
+ 640
113
+ ]
114
+ },
115
+ "observation.images.endeff_ir": {
116
+ "type": "VISUAL",
117
+ "shape": [
118
+ 3,
119
+ 480,
120
+ 640
121
+ ]
122
+ }
123
+ },
124
+ "output_features": {
125
+ "action": {
126
+ "type": "ACTION",
127
+ "shape": [
128
+ 6
129
+ ]
130
+ }
131
+ },
132
+ "device": "cuda",
133
+ "use_amp": false,
134
+ "push_to_hub": true,
135
+ "repo_id": "tshiamor/diffussion_so101-block-horizontal-layComb12",
136
+ "private": null,
137
+ "tags": null,
138
+ "license": null,
139
+ "horizon": 16,
140
+ "n_action_steps": 8,
141
+ "drop_n_last_frames": 7,
142
+ "vision_backbone": "resnet18",
143
+ "crop_shape": [
144
+ 84,
145
+ 84
146
+ ],
147
+ "crop_is_random": true,
148
+ "pretrained_backbone_weights": null,
149
+ "use_group_norm": true,
150
+ "spatial_softmax_num_keypoints": 32,
151
+ "use_separate_rgb_encoder_per_camera": false,
152
+ "down_dims": [
153
+ 64,
154
+ 128,
155
+ 256
156
+ ],
157
+ "kernel_size": 5,
158
+ "n_groups": 8,
159
+ "diffusion_step_embed_dim": 128,
160
+ "use_film_scale_modulation": true,
161
+ "noise_scheduler_type": "DDPM",
162
+ "num_train_timesteps": 100,
163
+ "beta_schedule": "squaredcos_cap_v2",
164
+ "beta_start": 0.0001,
165
+ "beta_end": 0.02,
166
+ "prediction_type": "epsilon",
167
+ "clip_sample": true,
168
+ "clip_sample_range": 1.0,
169
+ "num_inference_steps": null,
170
+ "do_mask_loss_for_padding": false,
171
+ "optimizer_lr": 0.0001,
172
+ "optimizer_betas": [
173
+ 0.95,
174
+ 0.999
175
+ ],
176
+ "optimizer_eps": 1e-08,
177
+ "optimizer_weight_decay": 1e-06,
178
+ "scheduler_name": "cosine",
179
+ "scheduler_warmup_steps": 500
180
+ },
181
+ "output_dir": "outputs/train/so101-block-horizontal-layComb12",
182
+ "job_name": "diffusion",
183
+ "resume": false,
184
+ "seed": 42,
185
+ "num_workers": 4,
186
+ "batch_size": 64,
187
+ "steps": 100000,
188
+ "eval_freq": 500,
189
+ "log_freq": 100,
190
+ "save_checkpoint": true,
191
+ "save_freq": 1000,
192
+ "use_policy_training_preset": true,
193
+ "optimizer": {
194
+ "type": "adam",
195
+ "lr": 0.0001,
196
+ "weight_decay": 1e-06,
197
+ "grad_clip_norm": 10.0,
198
+ "betas": [
199
+ 0.95,
200
+ 0.999
201
+ ],
202
+ "eps": 1e-08
203
+ },
204
+ "scheduler": {
205
+ "type": "diffuser",
206
+ "num_warmup_steps": 500,
207
+ "name": "cosine"
208
+ },
209
+ "eval": {
210
+ "n_episodes": 50,
211
+ "batch_size": 50,
212
+ "use_async_envs": false
213
+ },
214
+ "wandb": {
215
+ "enable": false,
216
+ "disable_artifact": false,
217
+ "project": "lerobot",
218
+ "entity": "tshiamo-none-org",
219
+ "notes": null,
220
+ "run_id": "diffusion_so101-block-horizontal",
221
+ "mode": null
222
+ }
223
+ }