mrdbourke commited on
Commit
cd76e65
·
verified ·
1 Parent(s): 8e98076

upload fine-tuned RT-DETRv2 trashify object detection model

Browse files
Files changed (4) hide show
  1. README.md +88 -0
  2. config.json +136 -0
  3. model.safetensors +3 -0
  4. training_args.bin +3 -0
README.md ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: apache-2.0
4
+ base_model: PekingU/rtdetr_v2_r50vd
5
+ tags:
6
+ - generated_from_trainer
7
+ model-index:
8
+ - name: rt_detrv2_finetuned_trashify_box_detector_v1
9
+ results: []
10
+ ---
11
+
12
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
+ should probably proofread and complete it, then remove this comment. -->
14
+
15
+ # rt_detrv2_finetuned_trashify_box_detector_v1
16
+
17
+ This model is a fine-tuned version of [PekingU/rtdetr_v2_r50vd](https://huggingface.co/PekingU/rtdetr_v2_r50vd) on an unknown dataset.
18
+ It achieves the following results on the evaluation set:
19
+ - Loss: 9.6546
20
+ - Map: 0.4705
21
+ - Map 50: 0.6521
22
+ - Map 75: 0.5344
23
+ - Map Small: 0.2
24
+ - Map Medium: 0.2496
25
+ - Map Large: 0.4925
26
+ - Mar 1: 0.5388
27
+ - Mar 10: 0.6875
28
+ - Mar 100: 0.7263
29
+ - Mar Small: 0.4
30
+ - Mar Medium: 0.4812
31
+ - Mar Large: 0.7609
32
+ - Map Bin: 0.7715
33
+ - Map Hand: 0.4915
34
+ - Map Not Bin: 0.1365
35
+ - Map Not Hand: -1.0
36
+ - Map Not Trash: 0.2446
37
+ - Map Trash: 0.6392
38
+ - Map Trash Arm: 0.5397
39
+
40
+ ## Model description
41
+
42
+ More information needed
43
+
44
+ ## Intended uses & limitations
45
+
46
+ More information needed
47
+
48
+ ## Training and evaluation data
49
+
50
+ More information needed
51
+
52
+ ## Training procedure
53
+
54
+ ### Training hyperparameters
55
+
56
+ The following hyperparameters were used during training:
57
+ - learning_rate: 0.0001
58
+ - train_batch_size: 16
59
+ - eval_batch_size: 16
60
+ - seed: 42
61
+ - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
62
+ - lr_scheduler_type: linear
63
+ - lr_scheduler_warmup_ratio: 0.05
64
+ - num_epochs: 10
65
+ - mixed_precision_training: Native AMP
66
+
67
+ ### Training results
68
+
69
+ | Training Loss | Epoch | Step | Validation Loss | Map | Map 50 | Map 75 | Map Small | Map Medium | Map Large | Mar 1 | Mar 10 | Mar 100 | Mar Small | Mar Medium | Mar Large | Map Bin | Map Hand | Map Not Bin | Map Not Hand | Map Not Trash | Map Trash | Map Trash Arm |
70
+ |:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:------:|:---------:|:----------:|:---------:|:------:|:------:|:-------:|:---------:|:----------:|:---------:|:-------:|:--------:|:-----------:|:------------:|:-------------:|:---------:|:-------------:|
71
+ | 81.6957 | 1.0 | 50 | 17.9459 | 0.1816 | 0.2751 | 0.1906 | 0.0125 | 0.061 | 0.1967 | 0.3226 | 0.4792 | 0.5507 | 0.05 | 0.175 | 0.609 | 0.3411 | 0.405 | 0.041 | -1.0 | 0.0548 | 0.2477 | 0.0 |
72
+ | 23.5749 | 2.0 | 100 | 11.3631 | 0.322 | 0.4496 | 0.3506 | 0.04 | 0.1311 | 0.3368 | 0.3935 | 0.5962 | 0.6883 | 0.2 | 0.5415 | 0.7219 | 0.7026 | 0.515 | 0.0123 | -1.0 | 0.1511 | 0.5472 | 0.0038 |
73
+ | 18.0309 | 3.0 | 150 | 10.3943 | 0.3579 | 0.4953 | 0.3979 | 0.17 | 0.1655 | 0.3748 | 0.4545 | 0.6209 | 0.7206 | 0.25 | 0.5216 | 0.758 | 0.7461 | 0.5023 | 0.0169 | -1.0 | 0.1787 | 0.6231 | 0.0802 |
74
+ | 15.7191 | 4.0 | 200 | 9.9083 | 0.3861 | 0.5432 | 0.4363 | 0.4 | 0.1956 | 0.4105 | 0.5212 | 0.658 | 0.7095 | 0.4 | 0.3756 | 0.7573 | 0.7665 | 0.5168 | 0.1076 | -1.0 | 0.2266 | 0.5907 | 0.1085 |
75
+ | 14.4269 | 5.0 | 250 | 9.8396 | 0.4014 | 0.5652 | 0.4359 | 0.3167 | 0.1559 | 0.428 | 0.5438 | 0.6762 | 0.7251 | 0.35 | 0.4028 | 0.768 | 0.7664 | 0.4787 | 0.1505 | -1.0 | 0.2503 | 0.6241 | 0.1381 |
76
+ | 13.1984 | 6.0 | 300 | 9.8522 | 0.4317 | 0.5934 | 0.4876 | 0.1292 | 0.1813 | 0.4563 | 0.5281 | 0.6718 | 0.7377 | 0.4 | 0.3949 | 0.782 | 0.7713 | 0.4122 | 0.1176 | -1.0 | 0.2414 | 0.6297 | 0.418 |
77
+ | 12.4176 | 7.0 | 350 | 9.6682 | 0.4296 | 0.5886 | 0.4761 | 0.1167 | 0.1696 | 0.4603 | 0.5508 | 0.6912 | 0.7292 | 0.4 | 0.3756 | 0.7776 | 0.7733 | 0.4717 | 0.1867 | -1.0 | 0.2676 | 0.642 | 0.2362 |
78
+ | 11.6697 | 8.0 | 400 | 9.6841 | 0.4459 | 0.6246 | 0.513 | 0.4 | 0.2667 | 0.4671 | 0.5424 | 0.6817 | 0.7307 | 0.4 | 0.5193 | 0.7683 | 0.7712 | 0.4534 | 0.1454 | -1.0 | 0.2424 | 0.6407 | 0.4224 |
79
+ | 11.0414 | 9.0 | 450 | 9.6106 | 0.4878 | 0.6709 | 0.5571 | 0.3667 | 0.1876 | 0.512 | 0.5451 | 0.6827 | 0.7368 | 0.4 | 0.4472 | 0.7721 | 0.7738 | 0.4797 | 0.1304 | -1.0 | 0.2551 | 0.6483 | 0.6394 |
80
+ | 10.6143 | 10.0 | 500 | 9.6546 | 0.4705 | 0.6521 | 0.5344 | 0.2 | 0.2496 | 0.4925 | 0.5388 | 0.6875 | 0.7263 | 0.4 | 0.4812 | 0.7609 | 0.7715 | 0.4915 | 0.1365 | -1.0 | 0.2446 | 0.6392 | 0.5397 |
81
+
82
+
83
+ ### Framework versions
84
+
85
+ - Transformers 4.52.0.dev0
86
+ - Pytorch 2.7.0+cu126
87
+ - Datasets 3.6.0
88
+ - Tokenizers 0.21.1
config.json ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_dropout": 0.0,
3
+ "activation_function": "silu",
4
+ "anchor_image_size": null,
5
+ "architectures": [
6
+ "RTDetrV2ForObjectDetection"
7
+ ],
8
+ "attention_dropout": 0.0,
9
+ "auxiliary_loss": true,
10
+ "backbone": null,
11
+ "backbone_config": {
12
+ "depths": [
13
+ 3,
14
+ 4,
15
+ 6,
16
+ 3
17
+ ],
18
+ "downsample_in_bottleneck": false,
19
+ "downsample_in_first_stage": false,
20
+ "embedding_size": 64,
21
+ "hidden_act": "relu",
22
+ "hidden_sizes": [
23
+ 256,
24
+ 512,
25
+ 1024,
26
+ 2048
27
+ ],
28
+ "layer_type": "bottleneck",
29
+ "model_type": "rt_detr_resnet",
30
+ "num_channels": 3,
31
+ "out_features": [
32
+ "stage2",
33
+ "stage3",
34
+ "stage4"
35
+ ],
36
+ "out_indices": [
37
+ 2,
38
+ 3,
39
+ 4
40
+ ],
41
+ "stage_names": [
42
+ "stem",
43
+ "stage1",
44
+ "stage2",
45
+ "stage3",
46
+ "stage4"
47
+ ]
48
+ },
49
+ "backbone_kwargs": null,
50
+ "batch_norm_eps": 1e-05,
51
+ "box_noise_scale": 1.0,
52
+ "d_model": 256,
53
+ "decoder_activation_function": "relu",
54
+ "decoder_attention_heads": 8,
55
+ "decoder_ffn_dim": 1024,
56
+ "decoder_in_channels": [
57
+ 256,
58
+ 256,
59
+ 256
60
+ ],
61
+ "decoder_layers": 6,
62
+ "decoder_method": "default",
63
+ "decoder_n_levels": 3,
64
+ "decoder_n_points": 4,
65
+ "decoder_offset_scale": 0.5,
66
+ "disable_custom_kernels": true,
67
+ "dropout": 0.0,
68
+ "encode_proj_layers": [
69
+ 2
70
+ ],
71
+ "encoder_activation_function": "gelu",
72
+ "encoder_attention_heads": 8,
73
+ "encoder_ffn_dim": 1024,
74
+ "encoder_hidden_dim": 256,
75
+ "encoder_in_channels": [
76
+ 512,
77
+ 1024,
78
+ 2048
79
+ ],
80
+ "encoder_layers": 1,
81
+ "eos_coefficient": 0.0001,
82
+ "eval_size": null,
83
+ "feat_strides": [
84
+ 8,
85
+ 16,
86
+ 32
87
+ ],
88
+ "focal_loss_alpha": 0.75,
89
+ "focal_loss_gamma": 2.0,
90
+ "freeze_backbone_batch_norms": true,
91
+ "hidden_expansion": 1.0,
92
+ "id2label": {
93
+ "0": "bin",
94
+ "1": "hand",
95
+ "2": "not_bin",
96
+ "3": "not_hand",
97
+ "4": "not_trash",
98
+ "5": "trash",
99
+ "6": "trash_arm"
100
+ },
101
+ "initializer_bias_prior_prob": null,
102
+ "initializer_range": 0.01,
103
+ "is_encoder_decoder": true,
104
+ "label2id": {
105
+ "bin": 0,
106
+ "hand": 1,
107
+ "not_bin": 2,
108
+ "not_hand": 3,
109
+ "not_trash": 4,
110
+ "trash": 5,
111
+ "trash_arm": 6
112
+ },
113
+ "label_noise_ratio": 0.5,
114
+ "layer_norm_eps": 1e-05,
115
+ "learn_initial_query": false,
116
+ "matcher_alpha": 0.25,
117
+ "matcher_bbox_cost": 5.0,
118
+ "matcher_class_cost": 2.0,
119
+ "matcher_gamma": 2.0,
120
+ "matcher_giou_cost": 2.0,
121
+ "model_type": "rt_detr_v2",
122
+ "normalize_before": false,
123
+ "num_denoising": 100,
124
+ "num_feature_levels": 3,
125
+ "num_queries": 300,
126
+ "positional_encoding_temperature": 10000,
127
+ "torch_dtype": "float32",
128
+ "transformers_version": "4.52.0.dev0",
129
+ "use_focal_loss": true,
130
+ "use_pretrained_backbone": false,
131
+ "use_timm_backbone": false,
132
+ "weight_loss_bbox": 5.0,
133
+ "weight_loss_giou": 2.0,
134
+ "weight_loss_vfl": 1.0,
135
+ "with_box_refine": true
136
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9fb4fbee41b08d5969bdac2d695eeb3ccee3773445fe91328eabe522901a129a
3
+ size 171576780
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60edf0a983255800ef7f7e11a11c83d4847acb98c7fc305b414128b204e27550
3
+ size 5777