Voldemort108X commited on
Commit
d6145b2
·
verified ·
1 Parent(s): ce40a34

Add files using upload-large-folder tool

Browse files
Files changed (50) hide show
  1. Code/Baselines/__init__.py +0 -0
  2. Code/Baselines/experiment_baseline.ipynb +1628 -0
  3. Code/Baselines/run_dit_geoaware.sh +170 -0
  4. Code/Data_preprocess/extract_3D_mesh.py +86 -0
  5. Code/Data_preprocess/extract_kps_example_data.ipynb +69 -0
  6. Code/Data_preprocess/process_feats_sd35.py +282 -0
  7. Code/qwen-vl-flash-attn.yml +282 -0
  8. Code/qwen_inference/dataset.py +64 -0
  9. Code/qwen_inference/inference_batched.py +367 -0
  10. Code/qwen_inference/prompts/sys_bbox.txt +39 -0
  11. Code/qwen_inference/prompts/tsk_bbox.txt +4 -0
  12. Code/qwen_inference/prompts/tsk_vlm_judge.txt +22 -0
  13. Code/qwen_inference/qwen_utils.py +1289 -0
  14. Code/qwen_inference/utils.py +829 -0
  15. Code/sc_dit/all_comb_dp_pck_results.csv +0 -0
  16. Code/sc_dit/dataset.py +64 -0
  17. Code/sc_dit/eval_bbox_acc.py +202 -0
  18. Code/sc_dit/evaluate_vlm_judge.py +300 -0
  19. Code/sc_dit/experiment_vlm.ipynb +594 -0
  20. Code/sc_dit/extract_3D_mesh.py +86 -0
  21. Code/sc_dit/extract_vlm_prediction.py +158 -0
  22. Code/sc_dit/gpt_utils.py +0 -0
  23. Code/sc_dit/inference_batched.py +367 -0
  24. Code/sc_dit/inference_batched_with_conf.py +409 -0
  25. Code/sc_dit/inference_batched_with_crop.py +327 -0
  26. Code/sc_dit/inference_batched_with_crop_ort.py +355 -0
  27. Code/sc_dit/inference_gpt_actor_critic.py +403 -0
  28. Code/sc_dit/inference_gpt_actor_critic_ort_with_instruction.py +535 -0
  29. Code/sc_dit/inference_gpt_batched.py +325 -0
  30. Code/sc_dit/near_side_correction.py +224 -0
  31. Code/sc_dit/openai_api_key.py +1 -0
  32. Code/sc_dit/openai_utils.py +61 -0
  33. Code/sc_dit/optimize_camera_pose_clean.py +542 -0
  34. Code/sc_dit/orient_anything_demo.ipynb +25 -0
  35. Code/sc_dit/part_dictionary.py +436 -0
  36. Code/sc_dit/predict_correspondence_vlm.py +336 -0
  37. Code/sc_dit/qwen_gpt_proof_of_concept_actor_critic_detailed.ipynb +0 -0
  38. Code/sc_dit/qwen_gpt_proof_of_concept_actor_critic_simplified.ipynb +0 -0
  39. Code/sc_dit/qwen_proof_of_concept_crop.ipynb +0 -0
  40. Code/sc_dit/qwen_single_demo.ipynb +0 -0
  41. Code/sc_dit/qwen_utils.py +1452 -0
  42. Code/sc_dit/single_dp_pck_results.csv +5 -0
  43. Code/sc_dit/utils.py +869 -0
  44. Code/sc_dit/utils_actor_critic.py +408 -0
  45. Code/sc_dit/utils_actor_critic_with_ort.py +445 -0
  46. Code/sc_dit/vlm_judge_bbox_pred.py +216 -0
  47. Code/sc_dit/vlm_judge_bbox_pred_batched.py +208 -0
  48. Code/sc_dit/vlm_visualize_single_pred.ipynb +0 -0
  49. Code/sc_env_torch_113.yaml +232 -0
  50. README.md +1 -0
Code/Baselines/__init__.py ADDED
File without changes
Code/Baselines/experiment_baseline.ipynb ADDED
@@ -0,0 +1,1628 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "id": "430e89a1",
6
+ "metadata": {},
7
+ "source": [
8
+ "## GeoAware-SC"
9
+ ]
10
+ },
11
+ {
12
+ "cell_type": "markdown",
13
+ "id": "81eedb10",
14
+ "metadata": {},
15
+ "source": [
16
+ "## Pre-extract features for GeoAware-SC SD-UNet"
17
+ ]
18
+ },
19
+ {
20
+ "cell_type": "code",
21
+ "execution_count": null,
22
+ "id": "98e4fcda",
23
+ "metadata": {},
24
+ "outputs": [],
25
+ "source": [
26
+ "!python ./GeoAware-SC/preprocess_map.py '../../Datasets/PF-dataset-PASCAL/JPEGImages'"
27
+ ]
28
+ },
29
+ {
30
+ "cell_type": "code",
31
+ "execution_count": null,
32
+ "id": "eaca3cf4",
33
+ "metadata": {},
34
+ "outputs": [],
35
+ "source": [
36
+ "!python ./GeoAware-SC/preprocess_map.py '../../Datasets/SPair-71k/JPEGImages' '1'"
37
+ ]
38
+ },
39
+ {
40
+ "cell_type": "markdown",
41
+ "id": "190072b5",
42
+ "metadata": {},
43
+ "source": [
44
+ "## Extract the masks for unsupervised pose alignment"
45
+ ]
46
+ },
47
+ {
48
+ "cell_type": "code",
49
+ "execution_count": null,
50
+ "id": "6009104d",
51
+ "metadata": {},
52
+ "outputs": [],
53
+ "source": [
54
+ "!python ./GeoAware-SC/preprocess_mask_sam.py '../../Datasets/PF-dataset-PASCAL/JPEGImages'"
55
+ ]
56
+ },
57
+ {
58
+ "cell_type": "code",
59
+ "execution_count": null,
60
+ "id": "e20911a3",
61
+ "metadata": {},
62
+ "outputs": [],
63
+ "source": [
64
+ "!python ./GeoAware-SC/preprocess_mask_sam.py '../../Datasets/SPair-71k/JPEGImages'"
65
+ ]
66
+ },
67
+ {
68
+ "cell_type": "markdown",
69
+ "id": "77589ff4",
70
+ "metadata": {},
71
+ "source": [
72
+ "## Supervised Training"
73
+ ]
74
+ },
75
+ {
76
+ "cell_type": "markdown",
77
+ "id": "ac36a283",
78
+ "metadata": {},
79
+ "source": [
80
+ "original"
81
+ ]
82
+ },
83
+ {
84
+ "cell_type": "code",
85
+ "execution_count": null,
86
+ "id": "541a8f08",
87
+ "metadata": {},
88
+ "outputs": [],
89
+ "source": [
90
+ "!python ./GeoAware-SC/pck_train.py --config ./GeoAware-SC/configs/train_pascal.yaml --GPU_ID 4"
91
+ ]
92
+ },
93
+ {
94
+ "cell_type": "code",
95
+ "execution_count": null,
96
+ "id": "5cdc136c",
97
+ "metadata": {},
98
+ "outputs": [],
99
+ "source": [
100
+ "!python ./GeoAware-SC/pck_train.py --config ./GeoAware-SC/configs/train_spair.yaml --GPU_ID 1"
101
+ ]
102
+ },
103
+ {
104
+ "cell_type": "markdown",
105
+ "id": "5bc45556",
106
+ "metadata": {},
107
+ "source": [
108
+ "w/ dit feats"
109
+ ]
110
+ },
111
+ {
112
+ "cell_type": "markdown",
113
+ "id": "d5838586",
114
+ "metadata": {},
115
+ "source": [
116
+ "layer15+dino"
117
+ ]
118
+ },
119
+ {
120
+ "cell_type": "code",
121
+ "execution_count": null,
122
+ "id": "a0acc142",
123
+ "metadata": {},
124
+ "outputs": [],
125
+ "source": [
126
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/train_pascal.yaml --GPU_ID 4 --NOTE 'dit_t0.5_l15'"
127
+ ]
128
+ },
129
+ {
130
+ "cell_type": "code",
131
+ "execution_count": null,
132
+ "id": "0657a0cc",
133
+ "metadata": {},
134
+ "outputs": [],
135
+ "source": [
136
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/train_spair.yaml --GPU_ID 5 --NOTE 'dit_t0.5_l15'"
137
+ ]
138
+ },
139
+ {
140
+ "cell_type": "markdown",
141
+ "id": "f0975529",
142
+ "metadata": {},
143
+ "source": [
144
+ "layer20+dino"
145
+ ]
146
+ },
147
+ {
148
+ "cell_type": "code",
149
+ "execution_count": null,
150
+ "id": "615a4d60",
151
+ "metadata": {},
152
+ "outputs": [],
153
+ "source": [
154
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/train_pascal.yaml --GPU_ID 5 --NOTE 'dit_t0.5_l20' --SD_FEATURE_KEYS t_0.5_layer_20"
155
+ ]
156
+ },
157
+ {
158
+ "cell_type": "code",
159
+ "execution_count": null,
160
+ "id": "9fad2454",
161
+ "metadata": {},
162
+ "outputs": [],
163
+ "source": [
164
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/train_spair.yaml --GPU_ID 4 --NOTE 'dit_t0.5_l20' --SD_FEATURE_KEYS t_0.5_layer_20"
165
+ ]
166
+ },
167
+ {
168
+ "cell_type": "markdown",
169
+ "id": "741a46a1",
170
+ "metadata": {},
171
+ "source": [
172
+ "LBFGS"
173
+ ]
174
+ },
175
+ {
176
+ "cell_type": "code",
177
+ "execution_count": null,
178
+ "id": "fded66dd",
179
+ "metadata": {},
180
+ "outputs": [],
181
+ "source": [
182
+ "!python ./GeoAware-SC/pck_train_lbfgs.py --config ./GeoAware-SC/configs/train_pascal_lbfgs.yaml --GPU_ID 1 --OPTIMIZER 'LBFGS'"
183
+ ]
184
+ },
185
+ {
186
+ "cell_type": "code",
187
+ "execution_count": null,
188
+ "id": "5c5c70c8",
189
+ "metadata": {},
190
+ "outputs": [],
191
+ "source": [
192
+ "!python ./GeoAware-SC/pck_train.py --config ./GeoAware-SC/configs/train_spair_lbfgs.yaml --GPU_ID 2 --OPTIMIZER 'LBFGS'"
193
+ ]
194
+ },
195
+ {
196
+ "cell_type": "code",
197
+ "execution_count": null,
198
+ "id": "a20f5133",
199
+ "metadata": {},
200
+ "outputs": [],
201
+ "source": [
202
+ "!python ./GeoAware-SC/pck_train.py --config ./GeoAware-SC/configs/train_pascal_lbfgs_bs8.yaml --GPU_ID 3 --OPTIMIZER 'LBFGS'"
203
+ ]
204
+ },
205
+ {
206
+ "cell_type": "code",
207
+ "execution_count": null,
208
+ "id": "88b9cf88",
209
+ "metadata": {},
210
+ "outputs": [],
211
+ "source": [
212
+ "!python ./GeoAware-SC/pck_train.py --config ./GeoAware-SC/configs/train_spair_lbfgs_bs8.yaml --GPU_ID 4 --OPTIMIZER 'LBFGS'"
213
+ ]
214
+ },
215
+ {
216
+ "cell_type": "markdown",
217
+ "id": "c1314a0a",
218
+ "metadata": {},
219
+ "source": [
220
+ "## Testing"
221
+ ]
222
+ },
223
+ {
224
+ "cell_type": "markdown",
225
+ "id": "98dea849",
226
+ "metadata": {},
227
+ "source": [
228
+ "baseline"
229
+ ]
230
+ },
231
+ {
232
+ "cell_type": "code",
233
+ "execution_count": null,
234
+ "id": "5b30673c",
235
+ "metadata": {},
236
+ "outputs": [],
237
+ "source": [
238
+ "!python ./GeoAware-SC/pck_train.py --config ./GeoAware-SC/configs/eval_pascal.yaml --GPU_ID 1"
239
+ ]
240
+ },
241
+ {
242
+ "cell_type": "code",
243
+ "execution_count": null,
244
+ "id": "242e066c",
245
+ "metadata": {},
246
+ "outputs": [],
247
+ "source": [
248
+ "!python ./GeoAware-SC/pck_train.py --config ./GeoAware-SC/configs/eval_pascal.yaml --GPU_ID 1"
249
+ ]
250
+ },
251
+ {
252
+ "cell_type": "code",
253
+ "execution_count": null,
254
+ "id": "f1776d25",
255
+ "metadata": {},
256
+ "outputs": [],
257
+ "source": [
258
+ "!python ./GeoAware-SC/pck_train.py --config ./GeoAware-SC/configs/eval_spair.yaml --GPU_ID 2"
259
+ ]
260
+ },
261
+ {
262
+ "cell_type": "code",
263
+ "execution_count": null,
264
+ "id": "82270929",
265
+ "metadata": {},
266
+ "outputs": [],
267
+ "source": [
268
+ "!python ./GeoAware-SC/pck_train.py --config ./GeoAware-SC/configs/eval_pascal_reprod.yaml --GPU_ID 2"
269
+ ]
270
+ },
271
+ {
272
+ "cell_type": "code",
273
+ "execution_count": null,
274
+ "id": "fe685287",
275
+ "metadata": {},
276
+ "outputs": [],
277
+ "source": [
278
+ "!python ./GeoAware-SC/pck_train.py --config ./GeoAware-SC/configs/eval_spair.yaml --GPU_ID 2"
279
+ ]
280
+ },
281
+ {
282
+ "cell_type": "markdown",
283
+ "id": "127c7dd3",
284
+ "metadata": {},
285
+ "source": [
286
+ "visibility check"
287
+ ]
288
+ },
289
+ {
290
+ "cell_type": "code",
291
+ "execution_count": null,
292
+ "id": "388a3669",
293
+ "metadata": {},
294
+ "outputs": [],
295
+ "source": [
296
+ "!python ./GeoAware-SC/pck_train.py --config ./GeoAware-SC/configs/eval_pascal.yaml --GPU_ID 1 --VISIBILITY 'single' --NOTE 'pascal_sup_single_vis'"
297
+ ]
298
+ },
299
+ {
300
+ "cell_type": "code",
301
+ "execution_count": null,
302
+ "id": "949022a9",
303
+ "metadata": {},
304
+ "outputs": [],
305
+ "source": [
306
+ "!python ./GeoAware-SC/pck_train.py --config ./GeoAware-SC/configs/eval_spair.yaml --GPU_ID 2 --VISIBILITY 'single' --NOTE 'spair_sup_single_vis'"
307
+ ]
308
+ },
309
+ {
310
+ "cell_type": "markdown",
311
+ "id": "76c982b5",
312
+ "metadata": {},
313
+ "source": [
314
+ "w/ dit feats"
315
+ ]
316
+ },
317
+ {
318
+ "cell_type": "markdown",
319
+ "id": "8c9249d1",
320
+ "metadata": {},
321
+ "source": [
322
+ "layer15+dino"
323
+ ]
324
+ },
325
+ {
326
+ "cell_type": "code",
327
+ "execution_count": null,
328
+ "id": "3ea2531f",
329
+ "metadata": {},
330
+ "outputs": [],
331
+ "source": [
332
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_pascal_dit.yaml --GPU_ID 6 --NOTE 'pascal_dit_t0.5_l15' --LOAD './results_pascal/pck_train_dit_t0.5_l15_sample_100_None_lr_0.00125/best.pth'"
333
+ ]
334
+ },
335
+ {
336
+ "cell_type": "code",
337
+ "execution_count": null,
338
+ "id": "a7d1852a",
339
+ "metadata": {},
340
+ "outputs": [],
341
+ "source": [
342
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_spair_dit.yaml --GPU_ID 6 --NOTE 'spair_dit_t0.5_l15' --LOAD './results_spair/pck_train_dit_t0.5_l15_sample_2_None_lr_0.00125/best.pth'"
343
+ ]
344
+ },
345
+ {
346
+ "cell_type": "markdown",
347
+ "id": "29c3967e",
348
+ "metadata": {},
349
+ "source": [
350
+ "layer20+dino"
351
+ ]
352
+ },
353
+ {
354
+ "cell_type": "code",
355
+ "execution_count": null,
356
+ "id": "d23bfea8",
357
+ "metadata": {},
358
+ "outputs": [],
359
+ "source": []
360
+ },
361
+ {
362
+ "cell_type": "code",
363
+ "execution_count": null,
364
+ "id": "ad11ce7a",
365
+ "metadata": {},
366
+ "outputs": [],
367
+ "source": []
368
+ },
369
+ {
370
+ "cell_type": "markdown",
371
+ "id": "420965d8",
372
+ "metadata": {},
373
+ "source": [
374
+ "## Unsupervised"
375
+ ]
376
+ },
377
+ {
378
+ "cell_type": "markdown",
379
+ "id": "140c3c9f",
380
+ "metadata": {},
381
+ "source": [
382
+ "baseline w/o post-alignment"
383
+ ]
384
+ },
385
+ {
386
+ "cell_type": "code",
387
+ "execution_count": null,
388
+ "id": "8c42e018",
389
+ "metadata": {},
390
+ "outputs": [],
391
+ "source": [
392
+ "!python ./GeoAware-SC/pck_train.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE \"pascal_zs_baseline\" "
393
+ ]
394
+ },
395
+ {
396
+ "cell_type": "code",
397
+ "execution_count": null,
398
+ "id": "b8a2964e",
399
+ "metadata": {},
400
+ "outputs": [],
401
+ "source": [
402
+ "!python ./GeoAware-SC/pck_train.py --config ./GeoAware-SC/configs/eval_zero_shot_spair.yaml --GPU_ID 7 --NOTE \"spair_zs_baseline\" # ADAPT_FLIP: False"
403
+ ]
404
+ },
405
+ {
406
+ "cell_type": "markdown",
407
+ "id": "4b6e03e8",
408
+ "metadata": {},
409
+ "source": [
410
+ "baseline"
411
+ ]
412
+ },
413
+ {
414
+ "cell_type": "code",
415
+ "execution_count": null,
416
+ "id": "b116a806",
417
+ "metadata": {},
418
+ "outputs": [],
419
+ "source": [
420
+ "!python ./GeoAware-SC/pck_train.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE \"pascal_zs_baseline_adp_flip\" --ADAPT_FLIP"
421
+ ]
422
+ },
423
+ {
424
+ "cell_type": "code",
425
+ "execution_count": null,
426
+ "id": "7aab9b75",
427
+ "metadata": {},
428
+ "outputs": [],
429
+ "source": [
430
+ "!python ./GeoAware-SC/pck_train.py --config ./GeoAware-SC/configs/eval_zero_shot_spair.yaml --GPU_ID 3 --NOTE \"spair_zs_baseline_adp_flip_debug\" --ADAPT_FLIP"
431
+ ]
432
+ },
433
+ {
434
+ "cell_type": "code",
435
+ "execution_count": null,
436
+ "id": "eebdb962",
437
+ "metadata": {},
438
+ "outputs": [],
439
+ "source": [
440
+ "!python ./GeoAware-SC/pck_train.py --config ./GeoAware-SC/configs/eval_zero_shot_spair_subsample.yaml --GPU_ID 6 --NOTE \"pascal_zs_baseline_adp_flip_subsample\" --ADAPT_FLIP"
441
+ ]
442
+ },
443
+ {
444
+ "cell_type": "markdown",
445
+ "id": "0edd33f9",
446
+ "metadata": {},
447
+ "source": [
448
+ "w/ dit feats"
449
+ ]
450
+ },
451
+ {
452
+ "cell_type": "markdown",
453
+ "id": "a8f5215d",
454
+ "metadata": {},
455
+ "source": [
456
+ "layer 10+dino"
457
+ ]
458
+ },
459
+ {
460
+ "cell_type": "code",
461
+ "execution_count": null,
462
+ "id": "29415abe",
463
+ "metadata": {},
464
+ "outputs": [],
465
+ "source": [
466
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE 'pascal_zs_dit_t0.5_l10_dino' --SD_FEATURE_KEYS t_0.5_layer_10"
467
+ ]
468
+ },
469
+ {
470
+ "cell_type": "code",
471
+ "execution_count": null,
472
+ "id": "ca39f461",
473
+ "metadata": {},
474
+ "outputs": [],
475
+ "source": []
476
+ },
477
+ {
478
+ "cell_type": "markdown",
479
+ "id": "947d4c1f",
480
+ "metadata": {},
481
+ "source": [
482
+ "layer 10+layer 11+dino"
483
+ ]
484
+ },
485
+ {
486
+ "cell_type": "code",
487
+ "execution_count": null,
488
+ "id": "e10352e9",
489
+ "metadata": {},
490
+ "outputs": [],
491
+ "source": [
492
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE 'pascal_zs_dit_t0.5_l10_l11_dino' --SD_FEATURE_KEYS t_0.5_layer_10 t_0.5_layer_11"
493
+ ]
494
+ },
495
+ {
496
+ "cell_type": "code",
497
+ "execution_count": null,
498
+ "id": "f81af184",
499
+ "metadata": {},
500
+ "outputs": [],
501
+ "source": [
502
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE 'pascal_zs_dit_t0.7_l10_l11_dino' --SD_FEATURE_KEYS t_0.7_layer_10 t_0.7_layer_11"
503
+ ]
504
+ },
505
+ {
506
+ "cell_type": "markdown",
507
+ "id": "4a63f92a",
508
+ "metadata": {},
509
+ "source": [
510
+ "layer 10+layer 12+dino"
511
+ ]
512
+ },
513
+ {
514
+ "cell_type": "code",
515
+ "execution_count": null,
516
+ "id": "bef31bad",
517
+ "metadata": {},
518
+ "outputs": [],
519
+ "source": [
520
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE 'pascal_zs_dit_t0.5_l10_l12_dino' --SD_FEATURE_KEYS t_0.5_layer_10 t_0.5_layer_12"
521
+ ]
522
+ },
523
+ {
524
+ "cell_type": "code",
525
+ "execution_count": null,
526
+ "id": "63a3b010",
527
+ "metadata": {},
528
+ "outputs": [],
529
+ "source": [
530
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE 'pascal_zs_dit_t0.7_l10_l12_dino' --SD_FEATURE_KEYS t_0.7_layer_10 t_0.7_layer_12"
531
+ ]
532
+ },
533
+ {
534
+ "cell_type": "markdown",
535
+ "id": "27778af7",
536
+ "metadata": {},
537
+ "source": [
538
+ "layer 11+layer 12+dino"
539
+ ]
540
+ },
541
+ {
542
+ "cell_type": "code",
543
+ "execution_count": null,
544
+ "id": "197d55c8",
545
+ "metadata": {},
546
+ "outputs": [],
547
+ "source": [
548
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE 'pascal_zs_dit_t0.5_l11_l12_dino' --SD_FEATURE_KEYS t_0.5_layer_11 t_0.5_layer_12"
549
+ ]
550
+ },
551
+ {
552
+ "cell_type": "code",
553
+ "execution_count": null,
554
+ "id": "4cb59c2f",
555
+ "metadata": {},
556
+ "outputs": [],
557
+ "source": [
558
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE 'pascal_zs_dit_t0.7_l11_l12_dino' --SD_FEATURE_KEYS t_0.7_layer_11 t_0.7_layer_12"
559
+ ]
560
+ },
561
+ {
562
+ "cell_type": "code",
563
+ "execution_count": null,
564
+ "id": "4bc4147b",
565
+ "metadata": {},
566
+ "outputs": [],
567
+ "source": [
568
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE 'pascal_zs_dit_t0.5_l11_t0.7_l12_dino' --SD_FEATURE_KEYS t_0.5_layer_11 t_0.7_layer_12"
569
+ ]
570
+ },
571
+ {
572
+ "cell_type": "markdown",
573
+ "id": "e2e4853b",
574
+ "metadata": {},
575
+ "source": [
576
+ "layer 10+layer 15+dino"
577
+ ]
578
+ },
579
+ {
580
+ "cell_type": "code",
581
+ "execution_count": null,
582
+ "id": "13272e3a",
583
+ "metadata": {},
584
+ "outputs": [],
585
+ "source": [
586
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE 'pascal_zs_dit_t0.5_l10_l15_dino' --SD_FEATURE_KEYS t_0.5_layer_10 t_0.5_layer_15"
587
+ ]
588
+ },
589
+ {
590
+ "cell_type": "markdown",
591
+ "id": "2465766e",
592
+ "metadata": {},
593
+ "source": [
594
+ "efficient version but seems to have some performance issue"
595
+ ]
596
+ },
597
+ {
598
+ "cell_type": "code",
599
+ "execution_count": null,
600
+ "id": "18685f6b",
601
+ "metadata": {},
602
+ "outputs": [],
603
+ "source": [
604
+ "!python ./GeoAware-SC/pck_train_dit_efficient.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE 'pascal_zs_dit_t0.5_l10_l15_dino' --SD_FEATURE_KEYS t_0.5_layer_10 t_0.5_layer_15"
605
+ ]
606
+ },
607
+ {
608
+ "cell_type": "markdown",
609
+ "id": "ee068f48",
610
+ "metadata": {},
611
+ "source": [
612
+ "layer 9+layer10+layer12+dino"
613
+ ]
614
+ },
615
+ {
616
+ "cell_type": "code",
617
+ "execution_count": null,
618
+ "id": "5f47e47e",
619
+ "metadata": {},
620
+ "outputs": [],
621
+ "source": [
622
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE 'pascal_zs_dit_t0.5_l9_l10_l12_dino' --SD_FEATURE_KEYS t_0.5_layer_9 t_0.5_layer_10 t_0.5_layer_12"
623
+ ]
624
+ },
625
+ {
626
+ "cell_type": "code",
627
+ "execution_count": null,
628
+ "id": "a5002dfa",
629
+ "metadata": {},
630
+ "outputs": [],
631
+ "source": []
632
+ },
633
+ {
634
+ "cell_type": "markdown",
635
+ "id": "61339328",
636
+ "metadata": {},
637
+ "source": [
638
+ "layer 10+layer 15+layer20+dino"
639
+ ]
640
+ },
641
+ {
642
+ "cell_type": "code",
643
+ "execution_count": null,
644
+ "id": "ed91bba4",
645
+ "metadata": {},
646
+ "outputs": [],
647
+ "source": [
648
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE 'pascal_zs_dit_t0.5_l10_l15_l20_dino' --SD_FEATURE_KEYS t_0.5_layer_10 t_0.5_layer_15 t_0.5_layer_20"
649
+ ]
650
+ },
651
+ {
652
+ "cell_type": "code",
653
+ "execution_count": null,
654
+ "id": "58176fe3",
655
+ "metadata": {},
656
+ "outputs": [],
657
+ "source": []
658
+ },
659
+ {
660
+ "cell_type": "markdown",
661
+ "id": "0992ef37",
662
+ "metadata": {},
663
+ "source": [
664
+ "layer 10+layer 12+layer 15+dino"
665
+ ]
666
+ },
667
+ {
668
+ "cell_type": "code",
669
+ "execution_count": null,
670
+ "id": "ed5dabb4",
671
+ "metadata": {},
672
+ "outputs": [],
673
+ "source": [
674
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE 'pascal_zs_dit_t0.5_l10_l12_l15_dino' --SD_FEATURE_KEYS t_0.5_layer_10 t_0.5_layer_12 t_0.5_layer_15"
675
+ ]
676
+ },
677
+ {
678
+ "cell_type": "code",
679
+ "execution_count": null,
680
+ "id": "32f12c74",
681
+ "metadata": {},
682
+ "outputs": [],
683
+ "source": []
684
+ },
685
+ {
686
+ "cell_type": "markdown",
687
+ "id": "2a7e6b0c",
688
+ "metadata": {},
689
+ "source": [
690
+ "layer 15+dino"
691
+ ]
692
+ },
693
+ {
694
+ "cell_type": "code",
695
+ "execution_count": null,
696
+ "id": "e3861e38",
697
+ "metadata": {},
698
+ "outputs": [],
699
+ "source": [
700
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE 'pascal_zs_dit_t0.5_l15' "
701
+ ]
702
+ },
703
+ {
704
+ "cell_type": "code",
705
+ "execution_count": null,
706
+ "id": "ae8fca37",
707
+ "metadata": {},
708
+ "outputs": [],
709
+ "source": [
710
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_spair.yaml --GPU_ID 7 --NOTE 'spair_zs_dit_t0.5_l15_adp_flip' --ADAPT_FLIP"
711
+ ]
712
+ },
713
+ {
714
+ "cell_type": "markdown",
715
+ "id": "5cee3e3d",
716
+ "metadata": {},
717
+ "source": [
718
+ "layer20+dino"
719
+ ]
720
+ },
721
+ {
722
+ "cell_type": "code",
723
+ "execution_count": null,
724
+ "id": "a0a41727",
725
+ "metadata": {},
726
+ "outputs": [],
727
+ "source": [
728
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE 'pascal_zs_dit_t0.5_l20' --SD_FEATURE_KEYS t_0.5_layer_20"
729
+ ]
730
+ },
731
+ {
732
+ "cell_type": "code",
733
+ "execution_count": null,
734
+ "id": "d6d7a983",
735
+ "metadata": {},
736
+ "outputs": [],
737
+ "source": [
738
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_spair.yaml --GPU_ID 7 --NOTE 'spair_zs_dit_t0.5_l20_adp_flip' --ADAPT_FLIP --SD_FEATURE_KEYS t_0.5_layer_20"
739
+ ]
740
+ },
741
+ {
742
+ "cell_type": "markdown",
743
+ "id": "8df228c5",
744
+ "metadata": {},
745
+ "source": [
746
+ "dino"
747
+ ]
748
+ },
749
+ {
750
+ "cell_type": "code",
751
+ "execution_count": null,
752
+ "id": "2b182bac",
753
+ "metadata": {},
754
+ "outputs": [],
755
+ "source": [
756
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE 'pascal_zs_dit_dino' "
757
+ ]
758
+ },
759
+ {
760
+ "cell_type": "code",
761
+ "execution_count": null,
762
+ "id": "549d03ed",
763
+ "metadata": {},
764
+ "outputs": [],
765
+ "source": []
766
+ },
767
+ {
768
+ "cell_type": "markdown",
769
+ "id": "8668cfe0",
770
+ "metadata": {},
771
+ "source": [
772
+ "layer 5"
773
+ ]
774
+ },
775
+ {
776
+ "cell_type": "code",
777
+ "execution_count": null,
778
+ "id": "2ca35cd3",
779
+ "metadata": {},
780
+ "outputs": [],
781
+ "source": [
782
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE 'pascal_zs_dit_t0.3_l5_no_dino' --NO_DINO --SD_FEATURE_KEYS t_0.3_layer_5"
783
+ ]
784
+ },
785
+ {
786
+ "cell_type": "code",
787
+ "execution_count": null,
788
+ "id": "5f0a6986",
789
+ "metadata": {},
790
+ "outputs": [],
791
+ "source": [
792
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE 'pascal_zs_dit_t0.5_l5_no_dino' --NO_DINO --SD_FEATURE_KEYS t_0.5_layer_5"
793
+ ]
794
+ },
795
+ {
796
+ "cell_type": "code",
797
+ "execution_count": null,
798
+ "id": "7b33ef61",
799
+ "metadata": {},
800
+ "outputs": [],
801
+ "source": [
802
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE 'pascal_zs_dit_t0.6_l5_no_dino' --NO_DINO --SD_FEATURE_KEYS t_0.6_layer_5"
803
+ ]
804
+ },
805
+ {
806
+ "cell_type": "code",
807
+ "execution_count": null,
808
+ "id": "c0844e42",
809
+ "metadata": {},
810
+ "outputs": [],
811
+ "source": [
812
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE 'pascal_zs_dit_t0.7_l5_no_dino' --NO_DINO --SD_FEATURE_KEYS t_0.7_layer_5"
813
+ ]
814
+ },
815
+ {
816
+ "cell_type": "code",
817
+ "execution_count": null,
818
+ "id": "41aad527",
819
+ "metadata": {},
820
+ "outputs": [],
821
+ "source": [
822
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 7 --NOTE 'pascal_zs_dit_t0.8_l5_no_dino' --NO_DINO --SD_FEATURE_KEYS t_0.8_layer_5"
823
+ ]
824
+ },
825
+ {
826
+ "cell_type": "markdown",
827
+ "id": "6e13179c",
828
+ "metadata": {},
829
+ "source": [
830
+ "layer 9"
831
+ ]
832
+ },
833
+ {
834
+ "cell_type": "code",
835
+ "execution_count": null,
836
+ "id": "8087d390",
837
+ "metadata": {},
838
+ "outputs": [],
839
+ "source": [
840
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE 'pascal_zs_dit_t0.5_l9_no_dino' --NO_DINO --SD_FEATURE_KEYS t_0.5_layer_9"
841
+ ]
842
+ },
843
+ {
844
+ "cell_type": "code",
845
+ "execution_count": null,
846
+ "id": "3ba2b184",
847
+ "metadata": {},
848
+ "outputs": [],
849
+ "source": [
850
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 7 --NOTE 'pascal_zs_dit_t0.6_l9_no_dino' --NO_DINO --SD_FEATURE_KEYS t_0.6_layer_9"
851
+ ]
852
+ },
853
+ {
854
+ "cell_type": "code",
855
+ "execution_count": null,
856
+ "id": "29360749",
857
+ "metadata": {},
858
+ "outputs": [],
859
+ "source": [
860
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE 'pascal_zs_dit_t0.7_l9_no_dino' --NO_DINO --SD_FEATURE_KEYS t_0.7_layer_9"
861
+ ]
862
+ },
863
+ {
864
+ "cell_type": "code",
865
+ "execution_count": null,
866
+ "id": "48c5d36e",
867
+ "metadata": {},
868
+ "outputs": [],
869
+ "source": [
870
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE 'pascal_zs_dit_t0.8_l9_no_dino' --NO_DINO --SD_FEATURE_KEYS t_0.8_layer_9"
871
+ ]
872
+ },
873
+ {
874
+ "cell_type": "markdown",
875
+ "id": "b9dc34b1",
876
+ "metadata": {},
877
+ "source": [
878
+ "layer 10"
879
+ ]
880
+ },
881
+ {
882
+ "cell_type": "code",
883
+ "execution_count": null,
884
+ "id": "1eaf176a",
885
+ "metadata": {},
886
+ "outputs": [],
887
+ "source": [
888
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE 'pascal_zs_dit_t0.3_l10_no_dino' --NO_DINO --SD_FEATURE_KEYS t_0.3_layer_10"
889
+ ]
890
+ },
891
+ {
892
+ "cell_type": "code",
893
+ "execution_count": null,
894
+ "id": "b7fd7166",
895
+ "metadata": {},
896
+ "outputs": [],
897
+ "source": [
898
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE 'pascal_zs_dit_t0.5_l10_no_dino' --NO_DINO --SD_FEATURE_KEYS t_0.5_layer_10"
899
+ ]
900
+ },
901
+ {
902
+ "cell_type": "code",
903
+ "execution_count": null,
904
+ "id": "ce558c62",
905
+ "metadata": {},
906
+ "outputs": [],
907
+ "source": [
908
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 5 --NOTE 'pascal_zs_dit_t0.6_l10_no_dino' --NO_DINO --SD_FEATURE_KEYS t_0.6_layer_10"
909
+ ]
910
+ },
911
+ {
912
+ "cell_type": "code",
913
+ "execution_count": null,
914
+ "id": "43ae6919",
915
+ "metadata": {},
916
+ "outputs": [],
917
+ "source": [
918
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE 'pascal_zs_dit_t0.7_l10_no_dino' --NO_DINO --SD_FEATURE_KEYS t_0.7_layer_10"
919
+ ]
920
+ },
921
+ {
922
+ "cell_type": "code",
923
+ "execution_count": null,
924
+ "id": "08ab4aee",
925
+ "metadata": {},
926
+ "outputs": [],
927
+ "source": [
928
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 4 --NOTE 'pascal_zs_dit_t0.8_l10_no_dino' --NO_DINO --SD_FEATURE_KEYS t_0.8_layer_10"
929
+ ]
930
+ },
931
+ {
932
+ "cell_type": "markdown",
933
+ "id": "f7d361ec",
934
+ "metadata": {},
935
+ "source": [
936
+ "layer 11"
937
+ ]
938
+ },
939
+ {
940
+ "cell_type": "code",
941
+ "execution_count": null,
942
+ "id": "23bdc37d",
943
+ "metadata": {},
944
+ "outputs": [],
945
+ "source": []
946
+ },
947
+ {
948
+ "cell_type": "code",
949
+ "execution_count": null,
950
+ "id": "6499ffe5",
951
+ "metadata": {},
952
+ "outputs": [],
953
+ "source": [
954
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE 'pascal_zs_dit_t0.5_l11_no_dino' --NO_DINO --SD_FEATURE_KEYS t_0.5_layer_11"
955
+ ]
956
+ },
957
+ {
958
+ "cell_type": "code",
959
+ "execution_count": null,
960
+ "id": "e77e4e36",
961
+ "metadata": {},
962
+ "outputs": [],
963
+ "source": [
964
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 5 --NOTE 'pascal_zs_dit_t0.6_l11_no_dino' --NO_DINO --SD_FEATURE_KEYS t_0.6_layer_11"
965
+ ]
966
+ },
967
+ {
968
+ "cell_type": "code",
969
+ "execution_count": null,
970
+ "id": "a0916dde",
971
+ "metadata": {},
972
+ "outputs": [],
973
+ "source": [
974
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE 'pascal_zs_dit_t0.7_l11_no_dino' --NO_DINO --SD_FEATURE_KEYS t_0.7_layer_11"
975
+ ]
976
+ },
977
+ {
978
+ "cell_type": "code",
979
+ "execution_count": null,
980
+ "id": "5a65a0ea",
981
+ "metadata": {},
982
+ "outputs": [],
983
+ "source": [
984
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 4 --NOTE 'pascal_zs_dit_t0.8_l11_no_dino' --NO_DINO --SD_FEATURE_KEYS t_0.8_layer_11"
985
+ ]
986
+ },
987
+ {
988
+ "cell_type": "markdown",
989
+ "id": "be062473",
990
+ "metadata": {},
991
+ "source": [
992
+ "layer 12"
993
+ ]
994
+ },
995
+ {
996
+ "cell_type": "code",
997
+ "execution_count": null,
998
+ "id": "0747f27d",
999
+ "metadata": {},
1000
+ "outputs": [],
1001
+ "source": []
1002
+ },
1003
+ {
1004
+ "cell_type": "code",
1005
+ "execution_count": null,
1006
+ "id": "68442baa",
1007
+ "metadata": {},
1008
+ "outputs": [],
1009
+ "source": [
1010
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE 'pascal_zs_dit_t0.5_l12_no_dino' --NO_DINO --SD_FEATURE_KEYS t_0.5_layer_12"
1011
+ ]
1012
+ },
1013
+ {
1014
+ "cell_type": "code",
1015
+ "execution_count": null,
1016
+ "id": "9334c986",
1017
+ "metadata": {},
1018
+ "outputs": [],
1019
+ "source": [
1020
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 3 --NOTE 'pascal_zs_dit_t0.6_l12_no_dino' --NO_DINO --SD_FEATURE_KEYS t_0.6_layer_12"
1021
+ ]
1022
+ },
1023
+ {
1024
+ "cell_type": "code",
1025
+ "execution_count": null,
1026
+ "id": "334c9bda",
1027
+ "metadata": {},
1028
+ "outputs": [],
1029
+ "source": [
1030
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE 'pascal_zs_dit_t0.7_l12_no_dino' --NO_DINO --SD_FEATURE_KEYS t_0.7_layer_12"
1031
+ ]
1032
+ },
1033
+ {
1034
+ "cell_type": "code",
1035
+ "execution_count": null,
1036
+ "id": "04d23da9",
1037
+ "metadata": {},
1038
+ "outputs": [],
1039
+ "source": [
1040
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 2 --NOTE 'pascal_zs_dit_t0.8_l12_no_dino' --NO_DINO --SD_FEATURE_KEYS t_0.8_layer_12"
1041
+ ]
1042
+ },
1043
+ {
1044
+ "cell_type": "markdown",
1045
+ "id": "e2881e9d",
1046
+ "metadata": {},
1047
+ "source": [
1048
+ "layer 13"
1049
+ ]
1050
+ },
1051
+ {
1052
+ "cell_type": "code",
1053
+ "execution_count": null,
1054
+ "id": "3ccfed22",
1055
+ "metadata": {},
1056
+ "outputs": [],
1057
+ "source": []
1058
+ },
1059
+ {
1060
+ "cell_type": "code",
1061
+ "execution_count": null,
1062
+ "id": "029bff11",
1063
+ "metadata": {},
1064
+ "outputs": [],
1065
+ "source": [
1066
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE 'pascal_zs_dit_t0.5_l13_no_dino' --NO_DINO --SD_FEATURE_KEYS t_0.5_layer_13"
1067
+ ]
1068
+ },
1069
+ {
1070
+ "cell_type": "code",
1071
+ "execution_count": null,
1072
+ "id": "ba7ba944",
1073
+ "metadata": {},
1074
+ "outputs": [],
1075
+ "source": [
1076
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE 'pascal_zs_dit_t0.6_l13_no_dino' --NO_DINO --SD_FEATURE_KEYS t_0.6_layer_13"
1077
+ ]
1078
+ },
1079
+ {
1080
+ "cell_type": "code",
1081
+ "execution_count": null,
1082
+ "id": "3e4a191f",
1083
+ "metadata": {},
1084
+ "outputs": [],
1085
+ "source": [
1086
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE 'pascal_zs_dit_t0.7_l13_no_dino' --NO_DINO --SD_FEATURE_KEYS t_0.7_layer_13"
1087
+ ]
1088
+ },
1089
+ {
1090
+ "cell_type": "code",
1091
+ "execution_count": null,
1092
+ "id": "c9531ba5",
1093
+ "metadata": {},
1094
+ "outputs": [],
1095
+ "source": [
1096
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 7 --NOTE 'pascal_zs_dit_t0.8_l13_no_dino' --NO_DINO --SD_FEATURE_KEYS t_0.8_layer_13"
1097
+ ]
1098
+ },
1099
+ {
1100
+ "cell_type": "markdown",
1101
+ "id": "b8f8ea39",
1102
+ "metadata": {},
1103
+ "source": [
1104
+ "layer 15"
1105
+ ]
1106
+ },
1107
+ {
1108
+ "cell_type": "code",
1109
+ "execution_count": null,
1110
+ "id": "6c691a72",
1111
+ "metadata": {},
1112
+ "outputs": [],
1113
+ "source": [
1114
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE 'pascal_zs_dit_t0.3_l15_no_dino' --NO_DINO --SD_FEATURE_KEYS t_0.3_layer_15"
1115
+ ]
1116
+ },
1117
+ {
1118
+ "cell_type": "code",
1119
+ "execution_count": null,
1120
+ "id": "b09dc3d4",
1121
+ "metadata": {},
1122
+ "outputs": [],
1123
+ "source": [
1124
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE 'pascal_zs_dit_t0.5_l15_no_dino' --NO_DINO --SD_FEATURE_KEYS t_0.5_layer_15"
1125
+ ]
1126
+ },
1127
+ {
1128
+ "cell_type": "code",
1129
+ "execution_count": null,
1130
+ "id": "9bcbdb33",
1131
+ "metadata": {},
1132
+ "outputs": [],
1133
+ "source": [
1134
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 3 --NOTE 'pascal_zs_dit_t0.6_l15_no_dino' --NO_DINO --SD_FEATURE_KEYS t_0.6_layer_15"
1135
+ ]
1136
+ },
1137
+ {
1138
+ "cell_type": "code",
1139
+ "execution_count": null,
1140
+ "id": "e1a0561b",
1141
+ "metadata": {},
1142
+ "outputs": [],
1143
+ "source": [
1144
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE 'pascal_zs_dit_t0.7_l15_no_dino' --NO_DINO --SD_FEATURE_KEYS t_0.7_layer_15"
1145
+ ]
1146
+ },
1147
+ {
1148
+ "cell_type": "code",
1149
+ "execution_count": null,
1150
+ "id": "8e329675",
1151
+ "metadata": {},
1152
+ "outputs": [],
1153
+ "source": [
1154
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 2 --NOTE 'pascal_zs_dit_t0.8_l15_no_dino' --NO_DINO --SD_FEATURE_KEYS t_0.8_layer_15"
1155
+ ]
1156
+ },
1157
+ {
1158
+ "cell_type": "markdown",
1159
+ "id": "447f4004",
1160
+ "metadata": {},
1161
+ "source": [
1162
+ "layer 20"
1163
+ ]
1164
+ },
1165
+ {
1166
+ "cell_type": "code",
1167
+ "execution_count": null,
1168
+ "id": "569c940c",
1169
+ "metadata": {},
1170
+ "outputs": [],
1171
+ "source": [
1172
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE 'pascal_zs_dit_t0.3_l20_no_dino' --NO_DINO --SD_FEATURE_KEYS t_0.3_layer_20"
1173
+ ]
1174
+ },
1175
+ {
1176
+ "cell_type": "code",
1177
+ "execution_count": null,
1178
+ "id": "08d451ef",
1179
+ "metadata": {},
1180
+ "outputs": [],
1181
+ "source": [
1182
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE 'pascal_zs_dit_t0.5_l20_no_dino' --NO_DINO --SD_FEATURE_KEYS t_0.5_layer_20"
1183
+ ]
1184
+ },
1185
+ {
1186
+ "cell_type": "code",
1187
+ "execution_count": null,
1188
+ "id": "8231d012",
1189
+ "metadata": {},
1190
+ "outputs": [],
1191
+ "source": [
1192
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE 'pascal_zs_dit_t0.7_l20_no_dino' --NO_DINO --SD_FEATURE_KEYS t_0.7_layer_20"
1193
+ ]
1194
+ },
1195
+ {
1196
+ "cell_type": "markdown",
1197
+ "id": "954f1e0f",
1198
+ "metadata": {},
1199
+ "source": [
1200
+ "layer 10+layer 12"
1201
+ ]
1202
+ },
1203
+ {
1204
+ "cell_type": "code",
1205
+ "execution_count": null,
1206
+ "id": "d97002be",
1207
+ "metadata": {},
1208
+ "outputs": [],
1209
+ "source": [
1210
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE 'pascal_zs_dit_t0.5_l10_l12_no_dino' --NO_DINO --SD_FEATURE_KEYS t_0.5_layer_10 t_0.5_layer_12"
1211
+ ]
1212
+ },
1213
+ {
1214
+ "cell_type": "code",
1215
+ "execution_count": null,
1216
+ "id": "1cb6bb96",
1217
+ "metadata": {},
1218
+ "outputs": [],
1219
+ "source": [
1220
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE 'pascal_zs_dit_t0.7_l10_l12_no_dino' --NO_DINO --SD_FEATURE_KEYS t_0.7_layer_10 t_0.7_layer_12"
1221
+ ]
1222
+ },
1223
+ {
1224
+ "cell_type": "markdown",
1225
+ "id": "46ff2c36",
1226
+ "metadata": {},
1227
+ "source": [
1228
+ "layer 10+layer 11"
1229
+ ]
1230
+ },
1231
+ {
1232
+ "cell_type": "code",
1233
+ "execution_count": null,
1234
+ "id": "ddc2c1cd",
1235
+ "metadata": {},
1236
+ "outputs": [],
1237
+ "source": [
1238
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE 'pascal_zs_dit_t0.5_l10_l11_no_dino' --NO_DINO --SD_FEATURE_KEYS t_0.5_layer_10 t_0.5_layer_11"
1239
+ ]
1240
+ },
1241
+ {
1242
+ "cell_type": "code",
1243
+ "execution_count": null,
1244
+ "id": "00d84b97",
1245
+ "metadata": {},
1246
+ "outputs": [],
1247
+ "source": [
1248
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE 'pascal_zs_dit_t0.7_l10_l11_no_dino' --NO_DINO --SD_FEATURE_KEYS t_0.7_layer_10 t_0.7_layer_11"
1249
+ ]
1250
+ },
1251
+ {
1252
+ "cell_type": "markdown",
1253
+ "id": "1382199b",
1254
+ "metadata": {},
1255
+ "source": [
1256
+ "layer 11+layer 12"
1257
+ ]
1258
+ },
1259
+ {
1260
+ "cell_type": "code",
1261
+ "execution_count": null,
1262
+ "id": "4da4587d",
1263
+ "metadata": {},
1264
+ "outputs": [],
1265
+ "source": [
1266
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE 'pascal_zs_dit_t0.5_l11_l12_no_dino' --NO_DINO --SD_FEATURE_KEYS t_0.5_layer_11 t_0.5_layer_12"
1267
+ ]
1268
+ },
1269
+ {
1270
+ "cell_type": "code",
1271
+ "execution_count": null,
1272
+ "id": "369becde",
1273
+ "metadata": {},
1274
+ "outputs": [],
1275
+ "source": [
1276
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE 'pascal_zs_dit_t0.7_l11_l12_no_dino' --NO_DINO --SD_FEATURE_KEYS t_0.7_layer_11 t_0.7_layer_12"
1277
+ ]
1278
+ },
1279
+ {
1280
+ "cell_type": "markdown",
1281
+ "id": "428ee089",
1282
+ "metadata": {},
1283
+ "source": [
1284
+ "## Current best Pascal"
1285
+ ]
1286
+ },
1287
+ {
1288
+ "cell_type": "code",
1289
+ "execution_count": null,
1290
+ "id": "7eb7a455",
1291
+ "metadata": {},
1292
+ "outputs": [],
1293
+ "source": [
1294
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 3 --NOTE 'pascal_zs_dit_t0.5_0.7_l11' --SD_FEATURE_KEYS t_0.5_layer_11 t_0.7_layer_11"
1295
+ ]
1296
+ },
1297
+ {
1298
+ "cell_type": "code",
1299
+ "execution_count": null,
1300
+ "id": "2e000e16",
1301
+ "metadata": {},
1302
+ "outputs": [],
1303
+ "source": [
1304
+ "import os\n",
1305
+ "print(os.listdir('../Baselines/results_clean_pascal/pck_train_pascal_zs_dit_t0.5_0.7_l11_sample_1_None_lr_0.00125'))\n",
1306
+ "\n",
1307
+ "path_result = '../Baselines/results_clean_pascal/pck_train_pascal_zs_dit_t0.5_0.7_l11_sample_1_None_lr_0.00125'\n",
1308
+ "\n",
1309
+ "# load the result.pkl file\n",
1310
+ "import pickle\n",
1311
+ "\n",
1312
+ "pkl_file = os.path.join(path_result, 'result.pkl')\n",
1313
+ "with open(pkl_file, 'rb') as f:\n",
1314
+ " data = pickle.load(f)\n",
1315
+ "\n",
1316
+ "print(data)\n",
1317
+ "\n",
1318
+ "print(len(data))\n",
1319
+ "\n",
1320
+ "\n",
1321
+ "print(os.listdir(path_result))"
1322
+ ]
1323
+ },
1324
+ {
1325
+ "cell_type": "markdown",
1326
+ "id": "cc3498b8",
1327
+ "metadata": {},
1328
+ "source": []
1329
+ },
1330
+ {
1331
+ "cell_type": "markdown",
1332
+ "id": "06d9c559",
1333
+ "metadata": {},
1334
+ "source": [
1335
+ "## Baseline spair zeroshot"
1336
+ ]
1337
+ },
1338
+ {
1339
+ "cell_type": "code",
1340
+ "execution_count": null,
1341
+ "id": "9ebd98c6",
1342
+ "metadata": {},
1343
+ "outputs": [],
1344
+ "source": [
1345
+ "!python ./GeoAware-SC/pck_train.py --config ./GeoAware-SC/configs/eval_zero_shot_spair_subsample.yaml --GPU_ID 6 --NOTE \"pascal_zs_baseline_subsample_nosoft\" --TEST_SAMPLE 10"
1346
+ ]
1347
+ },
1348
+ {
1349
+ "cell_type": "code",
1350
+ "execution_count": null,
1351
+ "id": "ce3f42eb",
1352
+ "metadata": {},
1353
+ "outputs": [],
1354
+ "source": [
1355
+ "!python ./GeoAware-SC/pck_train.py --config ./GeoAware-SC/configs/eval_zero_shot_spair_subsample.yaml --GPU_ID 6 --NOTE \"pascal_zs_baseline_subsample_nosoft_single\" --TEST_SAMPLE 10 --VISIBILITY 'single'"
1356
+ ]
1357
+ },
1358
+ {
1359
+ "cell_type": "code",
1360
+ "execution_count": null,
1361
+ "id": "a715d30e",
1362
+ "metadata": {},
1363
+ "outputs": [],
1364
+ "source": [
1365
+ "!python ./GeoAware-SC/pck_train.py --config ./GeoAware-SC/configs/eval_zero_shot_spair.yaml --GPU_ID 6 --NOTE \"spair_zs_baseline_single_no_adap_flip\" --TEST_SAMPLE 0 --VISIBILITY 'single'"
1366
+ ]
1367
+ },
1368
+ {
1369
+ "cell_type": "code",
1370
+ "execution_count": null,
1371
+ "id": "9e60c8bf",
1372
+ "metadata": {},
1373
+ "outputs": [],
1374
+ "source": [
1375
+ "!python ./GeoAware-SC/pck_train.py --config ./GeoAware-SC/configs/eval_zero_shot_spair.yaml --GPU_ID 7 --NOTE \"spair_zs_baseline_single\" --TEST_SAMPLE 0 --VISIBILITY 'single' --ADAPT_FLIP"
1376
+ ]
1377
+ },
1378
+ {
1379
+ "cell_type": "markdown",
1380
+ "id": "5183b94c",
1381
+ "metadata": {},
1382
+ "source": [
1383
+ "## Best Spair Attempt"
1384
+ ]
1385
+ },
1386
+ {
1387
+ "cell_type": "code",
1388
+ "execution_count": null,
1389
+ "id": "1c75fb69",
1390
+ "metadata": {},
1391
+ "outputs": [],
1392
+ "source": [
1393
+ "!python ./GeoAware-SC/pck_train.py --config ./GeoAware-SC/configs/eval_zero_shot_pascal.yaml --GPU_ID 6 --NOTE \"pascal_zs_baseline_adp_flip\" --ADAPT_FLIP"
1394
+ ]
1395
+ },
1396
+ {
1397
+ "cell_type": "code",
1398
+ "execution_count": null,
1399
+ "id": "e573bbf5",
1400
+ "metadata": {},
1401
+ "outputs": [],
1402
+ "source": [
1403
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_spair.yaml --GPU_ID 3 --ADAPT_FLIP --NOTE 'spair_zs_dit_t0.5_0.7_l11' --SD_FEATURE_KEYS t_0.5_layer_11 t_0.7_layer_11 "
1404
+ ]
1405
+ },
1406
+ {
1407
+ "cell_type": "code",
1408
+ "execution_count": null,
1409
+ "id": "bd13089f",
1410
+ "metadata": {},
1411
+ "outputs": [],
1412
+ "source": [
1413
+ "!python ./GeoAware-SC/pck_train_dit.py --config ./GeoAware-SC/configs/eval_zero_shot_spair_subsample.yaml --GPU_ID 6 --NOTE 'spair_zs_dit_t0.5_l12_0.7_l10_sub_10' --SD_FEATURE_KEYS t_0.5_layer_12 t_0.7_layer_10 "
1414
+ ]
1415
+ },
1416
+ {
1417
+ "cell_type": "markdown",
1418
+ "id": "5f531af3",
1419
+ "metadata": {},
1420
+ "source": [
1421
+ "## SPair + VLM"
1422
+ ]
1423
+ },
1424
+ {
1425
+ "cell_type": "markdown",
1426
+ "id": "a5a9ef38",
1427
+ "metadata": {},
1428
+ "source": [
1429
+ "## mutual visibility (traditional)"
1430
+ ]
1431
+ },
1432
+ {
1433
+ "cell_type": "code",
1434
+ "execution_count": null,
1435
+ "id": "189bf30f",
1436
+ "metadata": {},
1437
+ "outputs": [],
1438
+ "source": [
1439
+ "!python ./GeoAware-SC/pck_train_vlm.py --config ./GeoAware-SC/configs/eval_zero_shot_spair_subsample.yaml --GPU_ID 6 --NOTE \"spair_zs_baseline_vlm_subsample_10\" --TEST_SAMPLE 10 --BBOX_PATH '../sc_dit/results_vlm/filtered_predictions/test_sample_10_qwen_vl_32b'"
1440
+ ]
1441
+ },
1442
+ {
1443
+ "cell_type": "code",
1444
+ "execution_count": null,
1445
+ "id": "d85a0750",
1446
+ "metadata": {},
1447
+ "outputs": [],
1448
+ "source": [
1449
+ "!python ./GeoAware-SC/pck_train_vlm.py --config ./GeoAware-SC/configs/eval_zero_shot_spair_subsample.yaml --GPU_ID 6 --NOTE \"spair_zs_baseline_vlm_subsample_10\" --TEST_SAMPLE 10 --BBOX_PATH '../sc_dit/results_vlm/filtered_predictions/test_sample_10_qwen_vl_32b' --VISIBILITY 'mutual'"
1450
+ ]
1451
+ },
1452
+ {
1453
+ "cell_type": "code",
1454
+ "execution_count": null,
1455
+ "id": "53221795",
1456
+ "metadata": {},
1457
+ "outputs": [],
1458
+ "source": [
1459
+ "!python ./GeoAware-SC/pck_train_vlm.py --config ./GeoAware-SC/configs/eval_zero_shot_spair_subsample.yaml --GPU_ID 6 --NOTE \"spair_zs_baseline_vlm_subsample_10_gpt4.1_mini_judge_dilation_0\" --TEST_SAMPLE 10 --BBOX_PATH '../sc_dit/results_vlm/filtered_predictions/test_sample_10_qwen_vl_32b_gpt4.1_mini_judge_dilation_0/' --VISIBILITY 'mutual'"
1460
+ ]
1461
+ },
1462
+ {
1463
+ "cell_type": "markdown",
1464
+ "id": "598d8c65",
1465
+ "metadata": {},
1466
+ "source": [
1467
+ "compute all"
1468
+ ]
1469
+ },
1470
+ {
1471
+ "cell_type": "markdown",
1472
+ "id": "012f6a2e",
1473
+ "metadata": {},
1474
+ "source": [
1475
+ "unsupervised"
1476
+ ]
1477
+ },
1478
+ {
1479
+ "cell_type": "code",
1480
+ "execution_count": null,
1481
+ "id": "b1482c1c",
1482
+ "metadata": {},
1483
+ "outputs": [],
1484
+ "source": [
1485
+ "!python ./GeoAware-SC/pck_train_vlm_compute_all.py --config ./GeoAware-SC/configs/eval_zero_shot_spair_subsample.yaml --GPU_ID 1 --NOTE \"spair_zs_mutual_vlm_qwen_vl_32b\" --TEST_SAMPLE 0 --BBOX_PATH '../sc_dit/results_vlm/filtered_predictions/test_sample_10_qwen_vl_32b/' --VISIBILITY 'mutual' --SOFT_EVAL --ADAPT_FLIP"
1486
+ ]
1487
+ },
1488
+ {
1489
+ "cell_type": "markdown",
1490
+ "id": "1bed77bf",
1491
+ "metadata": {},
1492
+ "source": [
1493
+ "supervised"
1494
+ ]
1495
+ },
1496
+ {
1497
+ "cell_type": "code",
1498
+ "execution_count": null,
1499
+ "id": "5a94fe78",
1500
+ "metadata": {},
1501
+ "outputs": [],
1502
+ "source": [
1503
+ "!python ./GeoAware-SC/pck_train_vlm_compute_all.py --config ./GeoAware-SC/configs/eval_spair_subsample_vlm.yaml --GPU_ID 2 --NOTE 'spair_sup_mutual_vlm_qwen_vl_32b_mutual_vis' --TEST_SAMPLE 0 --BBOX_PATH '../sc_dit/results_vlm/filtered_predictions/test_sample_10_qwen_vl_32b' --VISIBILITY 'mutual' --SOFT_EVAL"
1504
+ ]
1505
+ },
1506
+ {
1507
+ "cell_type": "markdown",
1508
+ "id": "975fa542",
1509
+ "metadata": {},
1510
+ "source": [
1511
+ "## single visibility (in the wild)"
1512
+ ]
1513
+ },
1514
+ {
1515
+ "cell_type": "code",
1516
+ "execution_count": null,
1517
+ "id": "568213a0",
1518
+ "metadata": {},
1519
+ "outputs": [],
1520
+ "source": [
1521
+ "!python ./GeoAware-SC/pck_train_vlm.py --config ./GeoAware-SC/configs/eval_zero_shot_spair_subsample.yaml --GPU_ID 6 --NOTE \"spair_zs_baseline_vlm_subsample_10_vis_single\" --TEST_SAMPLE 10 --BBOX_PATH '../sc_dit/results_vlm/filtered_predictions/test_sample_10_qwen_vl_32b_dilation_0' --VISIBILITY 'single'"
1522
+ ]
1523
+ },
1524
+ {
1525
+ "cell_type": "code",
1526
+ "execution_count": null,
1527
+ "id": "89b855c9",
1528
+ "metadata": {},
1529
+ "outputs": [],
1530
+ "source": [
1531
+ "!python ./GeoAware-SC/pck_train_vlm.py --config ./GeoAware-SC/configs/eval_zero_shot_spair_subsample.yaml --GPU_ID 6 --NOTE \"spair_zs_baseline_vlm_subsample_10_vis_single_gpt4.1_mini_judge_dilation_28\" --TEST_SAMPLE 10 --BBOX_PATH '../sc_dit/results_vlm/filtered_predictions/test_sample_10_qwen_vl_32b_gpt4.1_mini_judge_dilation_28/' --VISIBILITY 'single'"
1532
+ ]
1533
+ },
1534
+ {
1535
+ "cell_type": "markdown",
1536
+ "id": "5815da7f",
1537
+ "metadata": {},
1538
+ "source": [
1539
+ "compute all"
1540
+ ]
1541
+ },
1542
+ {
1543
+ "cell_type": "markdown",
1544
+ "id": "ed0ba7cc",
1545
+ "metadata": {},
1546
+ "source": [
1547
+ "unsupervised"
1548
+ ]
1549
+ },
1550
+ {
1551
+ "cell_type": "code",
1552
+ "execution_count": null,
1553
+ "id": "8eb712e7",
1554
+ "metadata": {},
1555
+ "outputs": [],
1556
+ "source": [
1557
+ "!python ./GeoAware-SC/pck_train_vlm_compute_all.py --config ./GeoAware-SC/configs/eval_zero_shot_spair_subsample.yaml --GPU_ID 3 --NOTE \"spair_zs_single_vlm_qwen_vl_32b\" --TEST_SAMPLE 0 --BBOX_PATH '../sc_dit/results_vlm/filtered_predictions/test_sample_10_qwen_vl_32b/' --VISIBILITY 'single' --SOFT_EVAL --ADAPT_FLIP"
1558
+ ]
1559
+ },
1560
+ {
1561
+ "cell_type": "markdown",
1562
+ "id": "2d7b0ed7",
1563
+ "metadata": {},
1564
+ "source": [
1565
+ "supervised"
1566
+ ]
1567
+ },
1568
+ {
1569
+ "cell_type": "code",
1570
+ "execution_count": null,
1571
+ "id": "b8bda291",
1572
+ "metadata": {},
1573
+ "outputs": [],
1574
+ "source": [
1575
+ "!python ./GeoAware-SC/pck_train_vlm_compute_all.py --config ./GeoAware-SC/configs/eval_spair_subsample_vlm.yaml --GPU_ID 4 --NOTE 'spair_sup_single_vlm_qwen_vl_32b' --TEST_SAMPLE 0 --BBOX_PATH '../sc_dit/results_vlm/filtered_predictions/test_sample_10_qwen_vl_32b' --VISIBILITY 'single' --SOFT_EVAL"
1576
+ ]
1577
+ },
1578
+ {
1579
+ "cell_type": "markdown",
1580
+ "id": "dbfc9eca",
1581
+ "metadata": {},
1582
+ "source": [
1583
+ "sanity check if no judge"
1584
+ ]
1585
+ },
1586
+ {
1587
+ "cell_type": "code",
1588
+ "execution_count": null,
1589
+ "id": "59aa9d92",
1590
+ "metadata": {},
1591
+ "outputs": [],
1592
+ "source": [
1593
+ "!python ./GeoAware-SC/pck_train_vlm.py --config ./GeoAware-SC/configs/eval_zero_shot_spair_subsample.yaml --GPU_ID 6 --NOTE \"spair_zs_baseline_vlm_subsample_10_vis_single_no_judge_dilation_28\" --TEST_SAMPLE 10 --BBOX_PATH '../sc_dit/results_vlm/filtered_predictions/test_sample_10_qwen_vl_32b_no_judge_dilation_28/' --VISIBILITY 'single'"
1594
+ ]
1595
+ },
1596
+ {
1597
+ "cell_type": "code",
1598
+ "execution_count": null,
1599
+ "id": "2b7b949a",
1600
+ "metadata": {},
1601
+ "outputs": [],
1602
+ "source": [
1603
+ "!python ./GeoAware-SC/pck_train_vlm.py --config ./GeoAware-SC/configs/eval_zero_shot_spair_subsample.yaml --GPU_ID 6 --NOTE \"spair_zs_baseline_vlm_subsample_10_vis_mutual_no_judge_dilation_28\" --TEST_SAMPLE 10 --BBOX_PATH '../sc_dit/results_vlm/filtered_predictions/test_sample_10_qwen_vl_32b_no_judge_dilation_28/' --VISIBILITY 'mutual'"
1604
+ ]
1605
+ }
1606
+ ],
1607
+ "metadata": {
1608
+ "kernelspec": {
1609
+ "display_name": "sc_env_torch_113",
1610
+ "language": "python",
1611
+ "name": "python3"
1612
+ },
1613
+ "language_info": {
1614
+ "codemirror_mode": {
1615
+ "name": "ipython",
1616
+ "version": 3
1617
+ },
1618
+ "file_extension": ".py",
1619
+ "mimetype": "text/x-python",
1620
+ "name": "python",
1621
+ "nbconvert_exporter": "python",
1622
+ "pygments_lexer": "ipython3",
1623
+ "version": "3.9.21"
1624
+ }
1625
+ },
1626
+ "nbformat": 4,
1627
+ "nbformat_minor": 5
1628
+ }
Code/Baselines/run_dit_geoaware.sh ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ #
3
+ # Improved script to run all combinations of DiT features.
4
+ #
5
+ # Key Improvements:
6
+ # - Corrected key formatting for multiple features (space-separated).
7
+ # - Corrected GPU array handling for true parallel execution.
8
+ # - Enhanced Logging: Each job now writes its output to a dedicated log file
9
+ # in a timestamped `logs/` directory, making it easy to debug failed runs.
10
+ # - More Descriptive Output: Real-time logs now show which job is on which GPU slot.
11
+ # - Robustness: The script will no longer exit if a single Python job fails.
12
+ # - Better Process Management: The script saves its process ID (PID) to a file.
13
+
14
+ # Exit on unset variables and pipeline errors, but not on a single command error.
15
+ set -uo pipefail
16
+
17
+ # ---------------------------------------------------------------------------
18
+ # Section 0: Configuration
19
+ # ---------------------------------------------------------------------------
20
+
21
+ # --- File Paths ---
22
+ CONFIG=./GeoAware-SC/configs/eval_zero_shot_pascal.yaml
23
+ SCRIPT=./GeoAware-SC/pck_train_dit.py
24
+
25
+ # --- Feature Parameters ---
26
+ TIMESTEPS=(0.3 0.5 0.6 0.7 0.8)
27
+ LAYERS=(5 9 10 11 12 13 15 18 20)
28
+
29
+ # --- GPU Configuration ---
30
+ # IMPORTANT: Use spaces to separate GPU IDs. DO NOT use commas.
31
+ GPUS=(4 5 6 7)
32
+
33
+ # --- Job Configuration ---
34
+ COMBINATION_SIZES=(2 3)
35
+
36
+
37
+ # ---------------------------------------------------------------------------
38
+ # Section 1: Setup
39
+ # ---------------------------------------------------------------------------
40
+ MAX_PARALLEL=${#GPUS[@]}
41
+ LOG_DIR="logs_pascal_zs_dit_$(date +%F_%H-%M-%S)"
42
+ PID_FILE="run_dit_geoaware.pid"
43
+
44
+ # Check if required files exist
45
+ if [[ ! -f "$CONFIG" || ! -f "$SCRIPT" ]]; then
46
+ echo "Error: Config file ($CONFIG) or script file ($SCRIPT) not found."
47
+ exit 1
48
+ fi
49
+
50
+ # Create a directory for log files
51
+ mkdir -p "$LOG_DIR"
52
+ echo "✅ Log files will be saved in: $LOG_DIR"
53
+
54
+ # Store the script's Process ID (PID) for easy termination
55
+ echo $$ > "$PID_FILE"
56
+ echo "🚀 Script started with PID: $$. To stop this script, run: kill \$(cat $PID_FILE)"
57
+
58
+ # On exit, clean up the PID file
59
+ trap 'rm -f "$PID_FILE"' EXIT
60
+
61
+
62
+ # ---------------------------------------------------------------------------
63
+ # Section 2: Key Generation
64
+ # ---------------------------------------------------------------------------
65
+ keys=()
66
+ for t in "${TIMESTEPS[@]}"; do
67
+ for l in "${LAYERS[@]}"; do
68
+ keys+=("t_${t}_layer_${l}")
69
+ done
70
+ done
71
+ N=${#keys[@]}
72
+ echo "💡 Generated $N unique feature keys. Will run with and without DINO."
73
+ echo "🖥️ Will run jobs in parallel across $MAX_PARALLEL GPUs."
74
+
75
+
76
+ # ---------------------------------------------------------------------------
77
+ # Section 3: Job Execution
78
+ # ---------------------------------------------------------------------------
79
+ job_idx=0
80
+
81
+ # Throttles the job launch rate to match the number of available GPUs.
82
+ throttle() {
83
+ while (( $(jobs -r | wc -l) >= MAX_PARALLEL )); do
84
+ wait -n
85
+ done
86
+ }
87
+
88
+ # Launches a Python training job in the background with proper logging.
89
+ run_job() {
90
+ local keystr="$1" # Space-separated list of keys
91
+ local use_dino="$2" # "true" or "false"
92
+
93
+ throttle
94
+
95
+ local gpu_idx=$(( job_idx % MAX_PARALLEL ))
96
+ local gpu="${GPUS[$gpu_idx]}"
97
+
98
+ local dino_flag=""
99
+ local dino_tag=""
100
+ if [[ "$use_dino" == "false" ]]; then
101
+ dino_flag="--NO_DINO"
102
+ dino_tag="_no_dino"
103
+ fi
104
+
105
+ # Create a descriptive name for logs, replacing spaces in keys with underscores.
106
+ local note_stub="${keystr// /__}"
107
+ local run_name="pascal_zs_dit_${note_stub}${dino_tag}"
108
+
109
+ echo " (Job $((job_idx+1))) [Slot $((gpu_idx+1))/${MAX_PARALLEL}] Launching on GPU ${gpu}: ${keystr}${dino_tag}"
110
+
111
+ # Launch the Python script in the background.
112
+ # IMPORTANT: $keystr is NOT quoted, allowing the shell to split it into multiple arguments.
113
+ CUDA_VISIBLE_DEVICES="$gpu" \
114
+ python "$SCRIPT" \
115
+ --config "$CONFIG" \
116
+ --GPU_ID "$gpu" \
117
+ --NOTE "$run_name" \
118
+ $dino_flag \
119
+ --SD_FEATURE_KEYS $keystr > "${LOG_DIR}/${run_name}.log" 2>&1 &
120
+
121
+ ((job_idx++))
122
+ }
123
+
124
+
125
+ # ---------------------------------------------------------------------------
126
+ # Section 4: Main Loop
127
+ # ---------------------------------------------------------------------------
128
+ echo "⏳ Starting job enumeration for combination sizes: ${COMBINATION_SIZES[*]}"
129
+
130
+ for r in "${COMBINATION_SIZES[@]}"; do
131
+ echo "--- Generating combinations of size $r ---"
132
+
133
+ if (( r == 1 )); then
134
+ for ((i=0; i<N; i++)); do
135
+ run_job "${keys[i]}" "true"
136
+ run_job "${keys[i]}" "false"
137
+ done
138
+
139
+ elif (( r == 2 )); then
140
+ for ((i=0; i<N; i++)); do
141
+ for ((j=i+1; j<N; j++)); do
142
+ # FIXED: Keys are now separated by spaces, not commas.
143
+ combo="${keys[i]} ${keys[j]}"
144
+ run_job "$combo" "true"
145
+ run_job "$combo" "false"
146
+ done
147
+ done
148
+
149
+ elif (( r == 3 )); then
150
+ for ((i=0; i<N; i++)); do
151
+ for ((j=i+1; j<N; j++)); do
152
+ for ((k=j+1; k<N; k++)); do
153
+ # FIXED: Keys are now separated by spaces, not commas.
154
+ combo="${keys[i]} ${keys[j]} ${keys[k]}"
155
+ run_job "$combo" "true"
156
+ run_job "$combo" "false"
157
+ done
158
+ done
159
+ done
160
+ fi
161
+ done
162
+
163
+
164
+ # ---------------------------------------------------------------------------
165
+ # Section 5: Finalization
166
+ # ---------------------------------------------------------------------------
167
+ echo "All jobs have been launched. Waiting for the remaining jobs to finish..."
168
+ wait
169
+ echo "✅ All ${job_idx} jobs completed."
170
+ echo "Check the '${LOG_DIR}/' directory for individual output logs."
Code/Data_preprocess/extract_3D_mesh.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ from pathlib import Path
3
+ import torch
4
+ import os
5
+ import glob
6
+
7
+ # print(project_root)
8
+ project_root = Path(__file__).resolve().parents[1]
9
+ print(os.listdir(project_root))
10
+ project_path = os.path.join(str(project_root), 'Baselines', 'CraftsMan3D')
11
+ print(os.listdir(project_path))
12
+
13
+ # print("Project root:", project_root)
14
+ # print("Project path:", project_path)
15
+
16
+ # # Add the project root to Python's path to allow for absolute imports
17
+ if str(project_path) not in sys.path:
18
+ sys.path.append(str(project_path))
19
+
20
+ # print("Current sys.path:", sys.path)
21
+ from craftsman import CraftsManPipeline
22
+
23
+ def main():
24
+
25
+ print(os.listdir('../Baselines/sd3.5'))
26
+
27
+ path_data = '../Baselines/sd3.5/spair_71k_test_examples'
28
+ categories = os.listdir(path_data)
29
+ print("Categories:", categories)
30
+
31
+ path_images = glob.glob(os.path.join(path_data, '*', '*_bgd_rmv.png'))
32
+ print("Number of images found:", len(path_images))
33
+
34
+ path_output = './example_data/spair'
35
+
36
+ device = "cuda:7"
37
+
38
+ # print(os.listdir(path_data))
39
+
40
+ # path_image = os.path.join(path_data, 'motorbike', '2009_004845_bgd_rmv.png')
41
+
42
+ pipeline = CraftsManPipeline.from_pretrained("../Baselines/CraftsMan3D/ckpts/craftsman-DoraVAE", device=device, torch_dtype=torch.bfloat16)
43
+
44
+ for path_image in path_images:
45
+ # print("Processing image:", path_image)
46
+ # Extract the category and file ID from the image path
47
+ category = os.path.basename(os.path.dirname(path_image))
48
+ file_id = os.path.splitext(os.path.basename(path_image))[0].replace('_bgd_rmv', '')
49
+
50
+ path_obj_output = os.path.join(path_output, category, f"{file_id}.obj")
51
+ if not os.path.exists(os.path.dirname(path_obj_output)):
52
+ os.makedirs(os.path.dirname(path_obj_output))
53
+ # print("Output path for mesh:", path_obj_output)
54
+
55
+ mesh = pipeline(path_image).meshes[0]
56
+ mesh.export(path_obj_output)
57
+ print(f"Exported mesh for {file_id} to {path_obj_output}")
58
+
59
+ # copy the image to the ouput directory
60
+ path_image_output = os.path.join(path_output, category, f"{file_id}_bgd_rmv.png")
61
+ if not os.path.exists(os.path.dirname(path_image_output)):
62
+ os.makedirs(os.path.dirname(path_image_output))
63
+ os.system(f"cp {path_image} {path_image_output}")
64
+ os.system(f"cp {path_image.replace('_bgd_rmv.png', '.jpg')} {path_image_output.replace('_bgd_rmv.png', '.jpg')}")
65
+
66
+ # break
67
+
68
+ # # Run the pipeline
69
+ # try:
70
+ # result = pipeline(path_image, category=category, file_id=file_id)
71
+ # meshes = result.meshes
72
+ # if meshes:
73
+ # mesh = meshes[0]
74
+ # mesh.export(f"{file_id}.obj")
75
+ # print(f"Exported mesh for {file_id} to {file_id}.obj")
76
+ # else:
77
+ # print(f"No meshes found for {file_id}")
78
+ # except Exception as e:
79
+ # print(f"Error processing {path_image}: {e}")
80
+ # mesh = pipeline(path_image).meshes[0]
81
+ # mesh.export("motorbike_1.obj")
82
+
83
+
84
+
85
+ if __name__ == "__main__":
86
+ main()
Code/Data_preprocess/extract_kps_example_data.ipynb ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "id": "7a5614cf",
6
+ "metadata": {},
7
+ "source": [
8
+ "## Find the corresponding annotation for each pair"
9
+ ]
10
+ },
11
+ {
12
+ "cell_type": "code",
13
+ "execution_count": 6,
14
+ "id": "ff48e0c3",
15
+ "metadata": {},
16
+ "outputs": [
17
+ {
18
+ "name": "stdout",
19
+ "output_type": "stream",
20
+ "text": [
21
+ "['extract_3D_mesh.py', '2010_001614_loss.png', 'all_comb_dp_pck_results.csv', 'example_results', 'utils.py', 'wandb', 'single_dp_pck_results.csv', 'analyze_acc.ipynb', 'example_data', '__pycache__', 'scripts', 'optimize_camera_pose_clean.py', 'demo_2D_to_3D_correspondence.ipynb']\n",
22
+ "['spair', 'pf_pascal']\n",
23
+ "['SPair-71k', 'PF-dataset-PASCAL']\n"
24
+ ]
25
+ }
26
+ ],
27
+ "source": [
28
+ "import os\n",
29
+ "print(os.listdir('../sc_dit'))\n",
30
+ "\n",
31
+ "path_example_data = '../sc_dit/example_data'\n",
32
+ "print(os.listdir(path_example_data))\n",
33
+ "\n",
34
+ "\n",
35
+ "path_annotation = '../../Datasets'\n",
36
+ "print(os.listdir(path_annotation))"
37
+ ]
38
+ },
39
+ {
40
+ "cell_type": "code",
41
+ "execution_count": null,
42
+ "id": "792ac8bc",
43
+ "metadata": {},
44
+ "outputs": [],
45
+ "source": []
46
+ }
47
+ ],
48
+ "metadata": {
49
+ "kernelspec": {
50
+ "display_name": "sc_env_torch_113",
51
+ "language": "python",
52
+ "name": "python3"
53
+ },
54
+ "language_info": {
55
+ "codemirror_mode": {
56
+ "name": "ipython",
57
+ "version": 3
58
+ },
59
+ "file_extension": ".py",
60
+ "mimetype": "text/x-python",
61
+ "name": "python",
62
+ "nbconvert_exporter": "python",
63
+ "pygments_lexer": "ipython3",
64
+ "version": "3.9.21"
65
+ }
66
+ },
67
+ "nbformat": 4,
68
+ "nbformat_minor": 5
69
+ }
Code/Data_preprocess/process_feats_sd35.py ADDED
@@ -0,0 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import os
3
+ from tqdm import tqdm
4
+ import numpy as np
5
+ from PIL import Image
6
+ from utils.utils_correspondence import resize
7
+ from diffusers import StableDiffusion3Pipeline
8
+ import sys
9
+ from typing import List, Dict, Tuple
10
+ import glob
11
+
12
+
13
+ class FeatureExtractor:
14
+ """
15
+ A class to register hooks and extract features from specified layers of a model.
16
+ This version is redesigned to capture both the image and text branches from the
17
+ JointTransformerBlock.
18
+ """
19
+ def __init__(self, model, layers_to_extract: List[int]):
20
+ self.model = model
21
+ self.layers_to_extract = layers_to_extract
22
+ self.features: Dict[int, List[Tuple[torch.Tensor, torch.Tensor]]] = {}
23
+ self.hooks = []
24
+
25
+ def _get_hook(self, layer_idx: int):
26
+ def hook(model, input, output):
27
+ # The output of a JointTransformerBlock is a tuple: (hidden_states, encoder_hidden_states)
28
+ # 0: image branch, 1: text branch
29
+ image_branch_features = output[1].detach().cpu()
30
+ text_branch_features = output[0].detach().cpu()
31
+
32
+ if layer_idx not in self.features:
33
+ self.features[layer_idx] = []
34
+ self.features[layer_idx].append((image_branch_features, text_branch_features))
35
+ return hook
36
+
37
+ def register_hooks(self):
38
+ if not hasattr(self.model, 'transformer_blocks'):
39
+ raise AttributeError("The provided model does not have 'transformer_blocks'. Make sure you pass `pipe.transformer`.")
40
+
41
+ for layer_idx in self.layers_to_extract:
42
+ if layer_idx < 0 or layer_idx >= len(self.model.transformer_blocks):
43
+ print(f"Warning: Layer index {layer_idx} is out of bounds. Skipping.")
44
+ continue
45
+
46
+ layer = self.model.transformer_blocks[layer_idx]
47
+ hook_handle = layer.register_forward_hook(self._get_hook(layer_idx))
48
+ self.hooks.append(hook_handle)
49
+
50
+ def remove_hooks(self):
51
+ for hook in self.hooks:
52
+ hook.remove()
53
+ self.hooks = []
54
+ self.features = {}
55
+
56
+ def __enter__(self):
57
+ self.register_hooks()
58
+ return self
59
+
60
+ def __exit__(self, exc_type, exc_val, exc_tb):
61
+ self.remove_hooks()
62
+
63
+
64
+ def set_seed(seed=42):
65
+ torch.manual_seed(seed)
66
+ torch.cuda.manual_seed_all(seed)
67
+ torch.backends.cudnn.deterministic = True
68
+ torch.backends.cudnn.benchmark = False
69
+ np.random.seed(seed)
70
+ os.environ['PYTHONHASHSEED'] = str(seed)
71
+
72
+
73
+
74
+ def extract_feats_sd35(pipe, transformer, img_pil, category):
75
+ image_tensor = torch.from_numpy(np.array(img_pil)).float() / 127.5 - 1.0
76
+ image_tensor = image_tensor.permute(2, 0, 1).unsqueeze(0).to(DEVICE, dtype=pipe.vae.dtype)
77
+
78
+ with torch.no_grad():
79
+ vae_output = pipe.vae.encode(image_tensor)
80
+ clean_latent = vae_output.latent_dist.sample() * pipe.vae.config.scaling_factor
81
+
82
+ print("Preparing text conditioning embeddings...")
83
+
84
+
85
+ prompt = "A photo of {}".format(category)
86
+ print(f"Prompt: {prompt}")
87
+ prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds = pipe.encode_prompt(
88
+ prompt=prompt, prompt_2=prompt, prompt_3=None,
89
+ negative_prompt="", negative_prompt_2="", negative_prompt_3=None, device=DEVICE
90
+ )
91
+
92
+ batched_prompt_embeds = torch.cat([prompt_embeds, negative_prompt_embeds], dim=0).to(pipe.transformer.dtype)
93
+ batched_pooled_embeds = torch.cat([pooled_prompt_embeds, negative_pooled_prompt_embeds], dim=0).to(pipe.transformer.dtype)
94
+
95
+ # --- 2. DATA COLLECTION LOOP ---
96
+ # cond_feats = [[None for _ in LAYERS_TO_EXTRACT] for _ in TIMESTEPS]
97
+ # name_list = [[None for _ in LAYERS_TO_EXTRACT] for _ in TIMESTEPS]
98
+ cond_feats = []
99
+ name_list = []
100
+ # uncond_figs = [[None for _ in LAYERS_TO_EXTRACT] for _ in TIMESTEPS]
101
+
102
+ for i, ts_val in enumerate(TIMESTEPS):
103
+ print(f"\n===== Processing Timestep: {ts_val} =====\n")
104
+
105
+ # Apply noise for the current timestep
106
+ noise = torch.randn_like(clean_latent)
107
+ t = torch.tensor([ts_val], device=DEVICE, dtype=clean_latent.dtype)
108
+ t_reshaped = t.reshape(-1, *([1] * (clean_latent.dim() - 1)))
109
+ noised_latent = (1 - t_reshaped) * clean_latent + t_reshaped * noise
110
+
111
+ # Prepare inputs for the transformer (batch for CFG)
112
+ noised_latent_for_model = torch.cat([noised_latent] * 2).to(pipe.transformer.dtype)
113
+ timestep_for_model = torch.cat([t] * 2).to(pipe.transformer.dtype) * 1000
114
+
115
+ with FeatureExtractor(transformer, LAYERS_TO_EXTRACT) as extractor:
116
+ with torch.no_grad():
117
+ transformer(
118
+ hidden_states=noised_latent_for_model,
119
+ timestep=timestep_for_model,
120
+ encoder_hidden_states=batched_prompt_embeds,
121
+ pooled_projections=batched_pooled_embeds,
122
+ return_dict=False
123
+ )
124
+
125
+ # Process features for each requested layer at this timestep
126
+ for j, layer_idx in enumerate(LAYERS_TO_EXTRACT):
127
+ print(f"--- Analyzing Layer {layer_idx} ---")
128
+ if layer_idx in extractor.features:
129
+ # The hook returns a list with one item (from the single forward pass)
130
+ # which is a tuple of (image_batch_features, text_batch_features)
131
+ img_features_batch, _ = extractor.features[layer_idx][0]
132
+
133
+ # Separate cond and uncond features
134
+ cond_img_features = img_features_batch[0] # 3600 x 1536
135
+
136
+ num_patch = np.sqrt(cond_img_features.shape[0])
137
+ print(f"Number of patches: {num_patch}")
138
+ cond_img_features = cond_img_features.reshape(1, int(num_patch), int(num_patch), -1).permute(0, 3, 1, 2) # Reshape to [1, 1536, num_patch, num_patch] # be super careful in the order of the dimensions
139
+ # uncond_img_features = img_features_batch[1]
140
+
141
+ # print(cond_img_features.shape)
142
+ # cond_feats[i][j] = cond_img_features
143
+ cond_feats.append(cond_img_features)
144
+
145
+ name = "t_{}_layer_{}".format(ts_val, layer_idx)
146
+ # name_list[i][j] = name
147
+ name_list.append(name)
148
+
149
+ # normalize
150
+ # print(cond_img_features.shape, uncond_img_features.shape)
151
+
152
+ # # Run PCA and store the resulting image arrays
153
+ # cond_figs[i][j] = get_pca_image(cond_img_features)
154
+ # uncond_figs[i][j] = get_pca_image(uncond_img_features)
155
+ # print(f"Stored PCA images for Layer {layer_idx}")
156
+
157
+ return cond_feats, name_list
158
+
159
+
160
+ def process_and_save_features(file_paths, real_size, pipe, transformer, flip=False, angle=0, device=None, PF_pascal=False):
161
+ for file_path in tqdm(file_paths, desc="Processing images (Flip: {})".format(flip)):
162
+ img1 = Image.open(file_path).convert('RGB')
163
+ if flip:
164
+ img1 = img1.transpose(Image.FLIP_LEFT_RIGHT)
165
+ # img1 = edge_pad_rotate_and_crop(img1, angle=angle) # Uncomment this line to enable different rotation
166
+ img1_input = resize(img1, real_size, resize=True, to_pil=True)
167
+ # img1 = resize(img1, img_size, resize=True, to_pil=True)
168
+
169
+ if PF_pascal:
170
+
171
+ base_annotation_path = file_path.replace('JPEGImages', 'Annotations')
172
+ # base_annotation_path will be 'path/to/your/Datasets/PF-dataset-PASCAL/Annotations/2009_002457.jpg'
173
+
174
+ # 2. Extract the directory part of the annotation path
175
+ # This will give 'path/to/your/Datasets/PF-dataset-PASCAL/Annotations'
176
+ directory_part = os.path.dirname(base_annotation_path)
177
+
178
+ # 3. Extract the filename part (including .mat extension)
179
+ # This will give '2009_002457.mat'
180
+ filename_part = os.path.basename(file_path).replace('.jpg', '.mat')
181
+
182
+ # # 4. Construct the desired path with the wildcard
183
+ # file_path_annot_wildcard = os.path.join(directory_part, '*', filename_part)
184
+ # # category =
185
+ # # file_path = file_path.replace('JPEGImages', 'Annotations').replace('.jpg', '.mat')
186
+ # file_name = os.path.basename(file_path)
187
+
188
+ # # print(file_path.replace('JPEGImages', 'Annotations').replace('.jpg', '.mat'))
189
+
190
+ # parts = file_path.replace('JPEGImages', 'Annotations').replace('.jpg', '.mat')
191
+ # annotations_idx = parts.index('Annotations')
192
+ # # Insert '*' after 'Annotations'
193
+ # parts.insert(annotations_idx + 1, '*')
194
+
195
+ file_path_annot = glob.glob(os.path.join(directory_part, '*', filename_part)) # Assuming annotations are in .mat files
196
+
197
+ # print(file_name)
198
+ # print(file_path_annot)
199
+
200
+ category = file_path_annot[0].split('/')[-2] #if file_path_annot else 'unknown_category'
201
+ # print(category)
202
+ else:
203
+ print(file_path)
204
+ category = file_path.split('/')[-2] # Assuming category is the parent directory name
205
+
206
+ # break
207
+
208
+ accumulated_features = {}
209
+ for _ in range(NUM_ENSEMBLE):
210
+ # print('model device:', model.device)
211
+ # features1 = process_features_and_mask(model, aug, img1_input, mask=False, raw=True, device=device)
212
+ features1, name_list = extract_feats_sd35(pipe, transformer, img1_input, category=category) # Example category
213
+
214
+ for name, features in zip(name_list, features1):
215
+ accumulated_features[name] = accumulated_features.get(name, 0) + features
216
+
217
+
218
+ # del features1['s2']
219
+ # for k in features1:
220
+ # accumulated_features[k] = accumulated_features.get(k, 0) + features1[k]
221
+
222
+ for k in accumulated_features:
223
+ accumulated_features[k] /= NUM_ENSEMBLE
224
+
225
+ subdir_name = 'features' if NUM_ENSEMBLE == 1 else f'features_ensemble{NUM_ENSEMBLE}'
226
+ output_subdir = file_path.replace('JPEGImages', subdir_name).rsplit('/', 1)[0]
227
+ os.makedirs(output_subdir, exist_ok=True)
228
+
229
+ suffix = '_flip' if flip else ''
230
+ output_path = os.path.join(output_subdir, os.path.splitext(os.path.basename(file_path))[0] + f'_sd35{suffix}.pt')
231
+ torch.save(accumulated_features, output_path)
232
+
233
+ # clear memory
234
+ torch.cuda.empty_cache()
235
+
236
+ # print(output_path)
237
+ # break
238
+
239
+ # img1_batch = extractor_vit.preprocess_pil(img1)
240
+ # img1_desc_dino = extractor_vit.extract_descriptors(img1_batch.to(device), layer, facet).permute(0, 1, 3, 2).reshape(1, -1, 60, 60) # img1_batch.cuda()
241
+ # output_path_dino = os.path.join(output_subdir, os.path.splitext(os.path.basename(file_path))[0] + f'_dino{suffix}.pt')
242
+ # torch.save(img1_desc_dino, output_path_dino)
243
+
244
+
245
+
246
+ if __name__ == '__main__':
247
+ set_seed()
248
+
249
+ NUM_ENSEMBLE = 1
250
+ # TIMESTEPS = [0.3, 0.5, 0.6, 0.7, 0.8]
251
+ TIMESTEPS = [0.5, 0.7]
252
+ # LAYERS_TO_EXTRACT = [5, 9, 10, 11, 12, 13, 15, 18, 20] # Example layer indices to extract features from
253
+ LAYERS_TO_EXTRACT = [10, 11, 12] # Example layer indices to extract features from
254
+
255
+ real_size = 960
256
+
257
+ MODEL_ID = "stabilityai/stable-diffusion-3-medium-diffusers"
258
+ # DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
259
+
260
+ base_dir = sys.argv[1] if len(sys.argv) > 1 else 'data/SPair-71k/JPEGImages'
261
+ # set the device using the second argument if provided, otherwise use 'cuda'
262
+ device_str = sys.argv[2] if len(sys.argv) > 2 else 'cuda'
263
+ PF_pascal = sys.argv[3] if len(sys.argv) > 3 else False
264
+ print(f"Using device: {device_str}")
265
+ DEVICE = torch.device(device_str if torch.cuda.is_available() else "cpu")
266
+
267
+ print(f"Using device: {DEVICE}")
268
+ print(f"Loading pipeline: {MODEL_ID}...")
269
+
270
+ pipe = StableDiffusion3Pipeline.from_pretrained(MODEL_ID, torch_dtype=torch.float16)
271
+ pipe = pipe.to(DEVICE)
272
+ transformer = pipe.transformer
273
+
274
+ print("Pipeline loaded successfully.")
275
+
276
+ all_files = [os.path.join(subdir, file) for subdir, dirs, files in os.walk(base_dir) for file in files if file.endswith('.jpg')]
277
+
278
+ angles = [0] # angles for rotation
279
+ for angle in angles:
280
+ # Process and save features
281
+ process_and_save_features(all_files, real_size, pipe, transformer, flip=False, angle=angle, device=DEVICE, PF_pascal=PF_pascal)
282
+ process_and_save_features(all_files, real_size, pipe, transformer, flip=True, angle=angle, device=DEVICE, PF_pascal=PF_pascal)
Code/qwen-vl-flash-attn.yml ADDED
@@ -0,0 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: qwen-vl-flash-attn
2
+ channels:
3
+ - conda-forge
4
+ - nvidia
5
+ - pytorch
6
+ dependencies:
7
+ - _libgcc_mutex=0.1=conda_forge
8
+ - _openmp_mutex=4.5=3_kmp_llvm
9
+ - aom=3.6.1=h59595ed_0
10
+ - asttokens=3.0.0=pyhd8ed1ab_1
11
+ - blas=2.116=mkl
12
+ - blas-devel=3.9.0=16_linux64_mkl
13
+ - brotli-python=1.1.0=py311hfdbb021_3
14
+ - bzip2=1.0.8=h4bc722e_7
15
+ - ca-certificates=2025.7.9=hbd8a1cb_0
16
+ - certifi=2025.7.9=pyhd8ed1ab_0
17
+ - cffi=1.17.1=py311hf29c0ef_0
18
+ - charset-normalizer=3.4.2=pyhd8ed1ab_0
19
+ - comm=0.2.2=pyhd8ed1ab_1
20
+ - cpython=3.11.13=py311hd8ed1ab_0
21
+ - cuda-cudart=12.1.105=0
22
+ - cuda-cupti=12.1.105=0
23
+ - cuda-libraries=12.1.0=0
24
+ - cuda-nvrtc=12.1.105=0
25
+ - cuda-nvtx=12.1.105=0
26
+ - cuda-opencl=12.9.19=0
27
+ - cuda-runtime=12.1.0=0
28
+ - cuda-version=12.9=3
29
+ - debugpy=1.8.14=py311hfdbb021_0
30
+ - decorator=5.2.1=pyhd8ed1ab_0
31
+ - exceptiongroup=1.3.0=pyhd8ed1ab_0
32
+ - executing=2.2.0=pyhd8ed1ab_0
33
+ - ffmpeg=4.4.2=gpl_hdf48244_113
34
+ - filelock=3.18.0=pyhd8ed1ab_0
35
+ - font-ttf-dejavu-sans-mono=2.37=hab24e00_0
36
+ - font-ttf-inconsolata=3.000=h77eed37_0
37
+ - font-ttf-source-code-pro=2.038=h77eed37_0
38
+ - font-ttf-ubuntu=0.83=h77eed37_3
39
+ - fontconfig=2.15.0=h7e30c49_1
40
+ - fonts-conda-ecosystem=1=0
41
+ - fonts-conda-forge=1=0
42
+ - freetype=2.13.3=ha770c72_1
43
+ - giflib=5.2.2=hd590300_0
44
+ - gmp=6.3.0=hac33072_2
45
+ - gmpy2=2.2.1=py311h0f6cedb_0
46
+ - gnutls=3.7.9=hb077bed_0
47
+ - h2=4.2.0=pyhd8ed1ab_0
48
+ - hpack=4.1.0=pyhd8ed1ab_0
49
+ - hyperframe=6.1.0=pyhd8ed1ab_0
50
+ - icu=75.1=he02047a_0
51
+ - idna=3.10=pyhd8ed1ab_1
52
+ - importlib-metadata=8.7.0=pyhe01879c_1
53
+ - ipykernel=6.29.5=pyh3099207_0
54
+ - ipython=9.4.0=pyhfa0c392_0
55
+ - ipython_pygments_lexers=1.1.1=pyhd8ed1ab_0
56
+ - jedi=0.19.2=pyhd8ed1ab_1
57
+ - jinja2=3.1.6=pyhd8ed1ab_0
58
+ - jupyter_client=8.6.3=pyhd8ed1ab_1
59
+ - jupyter_core=5.8.1=pyh31011fe_0
60
+ - keyutils=1.6.1=h166bdaf_0
61
+ - krb5=1.21.3=h659f571_0
62
+ - lame=3.100=h166bdaf_1003
63
+ - lcms2=2.17=h717163a_0
64
+ - ld_impl_linux-64=2.44=h1423503_1
65
+ - lerc=4.0.0=h0aef613_1
66
+ - libasprintf=0.25.1=h8e693c7_0
67
+ - libblas=3.9.0=16_linux64_mkl
68
+ - libcblas=3.9.0=16_linux64_mkl
69
+ - libcublas=12.1.0.26=0
70
+ - libcufft=11.0.2.4=0
71
+ - libcufile=1.14.1.1=4
72
+ - libcurand=10.3.10.19=0
73
+ - libcusolver=11.4.4.55=0
74
+ - libcusparse=12.0.2.55=0
75
+ - libdeflate=1.24=h86f0d12_0
76
+ - libdrm=2.4.125=hb9d3cd8_0
77
+ - libedit=3.1.20250104=pl5321h7949ede_0
78
+ - libegl=1.7.0=ha4b6fd6_2
79
+ - libexpat=2.7.0=h5888daf_0
80
+ - libffi=3.4.6=h2dba641_1
81
+ - libfreetype=2.13.3=ha770c72_1
82
+ - libfreetype6=2.13.3=h48d6fc4_1
83
+ - libgcc=15.1.0=h767d61c_3
84
+ - libgcc-ng=15.1.0=h69a702a_3
85
+ - libgettextpo=0.25.1=h5888daf_0
86
+ - libgfortran=15.1.0=h69a702a_3
87
+ - libgfortran-ng=15.1.0=h69a702a_3
88
+ - libgfortran5=15.1.0=hcea5267_3
89
+ - libgl=1.7.0=ha4b6fd6_2
90
+ - libglvnd=1.7.0=ha4b6fd6_2
91
+ - libglx=1.7.0=ha4b6fd6_2
92
+ - libgomp=15.1.0=h767d61c_3
93
+ - libhwloc=2.11.2=default_h0d58e46_1001
94
+ - libiconv=1.18=h4ce23a2_1
95
+ - libidn2=2.3.8=ha4ef2c3_0
96
+ - libjpeg-turbo=3.1.0=hb9d3cd8_0
97
+ - liblapack=3.9.0=16_linux64_mkl
98
+ - liblapacke=3.9.0=16_linux64_mkl
99
+ - liblzma=5.8.1=hb9d3cd8_2
100
+ - libnpp=12.0.2.50=0
101
+ - libnsl=2.0.1=hb9d3cd8_1
102
+ - libnvjitlink=12.1.105=0
103
+ - libnvjpeg=12.1.1.14=0
104
+ - libpciaccess=0.18=hb9d3cd8_0
105
+ - libpng=1.6.50=h943b412_0
106
+ - libsodium=1.0.20=h4ab18f5_0
107
+ - libsqlite=3.50.2=hee844dc_2
108
+ - libstdcxx=15.1.0=h8f9b012_3
109
+ - libstdcxx-ng=15.1.0=h4852527_3
110
+ - libtasn1=4.20.0=hb9d3cd8_0
111
+ - libtiff=4.7.0=hf01ce69_5
112
+ - libunistring=0.9.10=h7f98852_0
113
+ - libuuid=2.38.1=h0b41bf4_0
114
+ - libva=2.22.0=h4f16b4b_2
115
+ - libvpx=1.13.1=h59595ed_0
116
+ - libwebp=1.6.0=h9635ea4_0
117
+ - libwebp-base=1.6.0=hd42ef1d_0
118
+ - libxcb=1.17.0=h8a09558_0
119
+ - libxcrypt=4.4.36=hd590300_1
120
+ - libxml2=2.13.8=h4bc477f_0
121
+ - libzlib=1.3.1=hb9d3cd8_2
122
+ - llvm-openmp=15.0.7=h0cdce71_0
123
+ - markupsafe=3.0.2=py311h2dc5d0c_1
124
+ - matplotlib-inline=0.1.7=pyhd8ed1ab_1
125
+ - mkl=2022.1.0=h84fe81f_915
126
+ - mkl-devel=2022.1.0=ha770c72_916
127
+ - mkl-include=2022.1.0=h84fe81f_915
128
+ - mpc=1.3.1=h24ddda3_1
129
+ - mpfr=4.2.1=h90cbb55_3
130
+ - mpmath=1.3.0=pyhd8ed1ab_1
131
+ - ncurses=6.5=h2d0b736_3
132
+ - nest-asyncio=1.6.0=pyhd8ed1ab_1
133
+ - nettle=3.9.1=h7ab15ed_0
134
+ - networkx=3.5=pyhe01879c_0
135
+ - ocl-icd=2.3.3=hb9d3cd8_0
136
+ - opencl-headers=2025.06.13=h5888daf_0
137
+ - openh264=2.3.1=hcb278e6_2
138
+ - openjpeg=2.5.3=h5fbd93e_0
139
+ - openssl=3.5.1=h7b32b05_0
140
+ - p11-kit=0.24.1=hc5aa10d_0
141
+ - packaging=25.0=pyh29332c3_1
142
+ - parso=0.8.4=pyhd8ed1ab_1
143
+ - pexpect=4.9.0=pyhd8ed1ab_1
144
+ - pickleshare=0.7.5=pyhd8ed1ab_1004
145
+ - pillow=11.3.0=py311h1322bbf_0
146
+ - pip=25.1.1=pyh8b19718_0
147
+ - platformdirs=4.3.8=pyhe01879c_0
148
+ - prompt-toolkit=3.0.51=pyha770c72_0
149
+ - psutil=7.0.0=py311h9ecbd09_0
150
+ - pthread-stubs=0.4=hb9d3cd8_1002
151
+ - ptyprocess=0.7.0=pyhd8ed1ab_1
152
+ - pure_eval=0.2.3=pyhd8ed1ab_1
153
+ - pycparser=2.22=pyh29332c3_1
154
+ - pygments=2.19.2=pyhd8ed1ab_0
155
+ - pysocks=1.7.1=pyha55dd90_7
156
+ - python=3.11.13=h9e4cc4f_0_cpython
157
+ - python-dateutil=2.9.0.post0=pyhe01879c_2
158
+ - python_abi=3.11=7_cp311
159
+ - pytorch-cuda=12.1=ha16c6d3_6
160
+ - pytorch-mutex=1.0=cuda
161
+ - pyyaml=6.0.2=py311h2dc5d0c_2
162
+ - pyzmq=27.0.0=py311h7deb3e3_0
163
+ - readline=8.2=h8c095d6_2
164
+ - requests=2.32.4=pyhd8ed1ab_0
165
+ - setuptools=80.9.0=pyhff2d567_0
166
+ - six=1.17.0=pyhd8ed1ab_0
167
+ - stack_data=0.6.3=pyhd8ed1ab_1
168
+ - svt-av1=1.4.1=hcb278e6_0
169
+ - tbb=2021.13.0=hceb3a55_1
170
+ - tk=8.6.13=noxft_hd72426e_102
171
+ - tornado=6.5.1=py311h9ecbd09_0
172
+ - traitlets=5.14.3=pyhd8ed1ab_1
173
+ - typing_extensions=4.14.1=pyhe01879c_0
174
+ - urllib3=2.5.0=pyhd8ed1ab_0
175
+ - wayland=1.24.0=h3e06ad9_0
176
+ - wayland-protocols=1.45=hd8ed1ab_0
177
+ - wcwidth=0.2.13=pyhd8ed1ab_1
178
+ - wheel=0.45.1=pyhd8ed1ab_1
179
+ - x264=1!164.3095=h166bdaf_2
180
+ - x265=3.5=h924138e_3
181
+ - xorg-libx11=1.8.12=h4f16b4b_0
182
+ - xorg-libxau=1.0.12=hb9d3cd8_0
183
+ - xorg-libxdmcp=1.1.5=hb9d3cd8_0
184
+ - xorg-libxext=1.3.6=hb9d3cd8_0
185
+ - xorg-libxfixes=6.0.1=hb9d3cd8_0
186
+ - yaml=0.2.5=h7f98852_2
187
+ - zeromq=4.3.5=h3b0a872_7
188
+ - zipp=3.23.0=pyhd8ed1ab_0
189
+ - zstandard=0.23.0=py311h9ecbd09_2
190
+ - zstd=1.5.7=hb8e6e7a_2
191
+ - pip:
192
+ - accelerate==1.8.1
193
+ - aiohappyeyeballs==2.6.1
194
+ - aiohttp==3.12.14
195
+ - aiosignal==1.4.0
196
+ - annotated-types==0.7.0
197
+ - anyio==4.9.0
198
+ - attrs==25.3.0
199
+ - av==15.0.0
200
+ - click==8.2.1
201
+ - contourpy==1.3.2
202
+ - cycler==0.12.1
203
+ - decord==0.6.0
204
+ - diskcache==5.6.3
205
+ - distro==1.9.0
206
+ - docstring-parser==0.17.0
207
+ - einops==0.8.1
208
+ - flash-attn==2.8.1
209
+ - fonttools==4.58.5
210
+ - frozenlist==1.7.0
211
+ - fsspec==2025.5.1
212
+ - gitdb==4.0.12
213
+ - gitpython==3.1.44
214
+ - h11==0.16.0
215
+ - hf-xet==1.1.5
216
+ - httpcore==1.0.9
217
+ - httpx==0.28.1
218
+ - huggingface-hub==0.33.4
219
+ - imageio==2.37.0
220
+ - instructor==1.10.0
221
+ - jiter==0.10.0
222
+ - joblib==1.5.1
223
+ - kiwisolver==1.4.8
224
+ - lazy-loader==0.4
225
+ - loguru==0.7.3
226
+ - markdown-it-py==3.0.0
227
+ - matplotlib==3.10.3
228
+ - mdurl==0.1.2
229
+ - multidict==6.6.3
230
+ - ninja==1.11.1.4
231
+ - numpy==2.2.6
232
+ - nvidia-cublas-cu12==12.4.5.8
233
+ - nvidia-cuda-cupti-cu12==12.4.127
234
+ - nvidia-cuda-nvrtc-cu12==12.4.127
235
+ - nvidia-cuda-runtime-cu12==12.4.127
236
+ - nvidia-cudnn-cu12==9.1.0.70
237
+ - nvidia-cufft-cu12==11.2.1.3
238
+ - nvidia-curand-cu12==10.3.5.147
239
+ - nvidia-cusolver-cu12==11.6.1.9
240
+ - nvidia-cusparse-cu12==12.3.1.170
241
+ - nvidia-cusparselt-cu12==0.6.2
242
+ - nvidia-nccl-cu12==2.21.5
243
+ - nvidia-nvjitlink-cu12==12.4.127
244
+ - nvidia-nvtx-cu12==12.4.127
245
+ - openai==1.97.1
246
+ - opencv-python==4.12.0.88
247
+ - pandas==2.3.1
248
+ - propcache==0.3.2
249
+ - protobuf==6.31.1
250
+ - pydantic==2.11.7
251
+ - pydantic-core==2.33.2
252
+ - pyparsing==3.2.3
253
+ - pytz==2025.2
254
+ - qwen-vl-utils==0.0.11
255
+ - regex==2024.11.6
256
+ - rich==14.1.0
257
+ - safetensors==0.5.3
258
+ - scikit-image==0.25.2
259
+ - scikit-learn==1.7.0
260
+ - scipy==1.16.0
261
+ - sentry-sdk==2.33.0
262
+ - shellingham==1.5.4
263
+ - smmap==5.0.2
264
+ - sniffio==1.3.1
265
+ - sympy==1.13.1
266
+ - tenacity==9.1.2
267
+ - threadpoolctl==3.6.0
268
+ - tifffile==2025.6.11
269
+ - timm==1.0.17
270
+ - tokenizers==0.21.2
271
+ - torch==2.6.0+cu124
272
+ - torchaudio==2.6.0+cu124
273
+ - torchvision==0.21.0+cu124
274
+ - tqdm==4.67.1
275
+ - transformers==4.51.3
276
+ - triton==3.2.0
277
+ - typer==0.16.0
278
+ - typing-inspection==0.4.1
279
+ - tzdata==2025.2
280
+ - wandb==0.21.0
281
+ - yarl==1.20.1
282
+ prefix: /opt/conda/envs/qwen-vl-flash-attn
Code/qwen_inference/dataset.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ def get_dataset_info(args, split):
4
+ if args.EVAL_DATASET == 'pascal':
5
+ # data_dir = 'data/PF-dataset-PASCAL'
6
+ data_dir = '../../Datasets/PF-dataset-PASCAL'
7
+ categories = sorted(os.listdir(os.path.join(data_dir, 'Annotations')))
8
+ # elif args.EVAL_DATASET == 'ap10k':
9
+ # data_dir = 'data/ap-10k'
10
+ # categories = []
11
+ # subfolders = os.listdir(os.path.join(data_dir, 'ImageAnnotation'))
12
+ # # Handle AP10K_EVAL test settings
13
+ # if args.AP10K_EVAL_SUBSET == 'intra-species':
14
+ # categories = [folder for subfolder in subfolders for folder in os.listdir(os.path.join(data_dir, 'ImageAnnotation', subfolder))]
15
+ # elif args.AP10K_EVAL_SUBSET == 'cross-species':
16
+ # categories = [subfolder for subfolder in subfolders if len(os.listdir(os.path.join(data_dir, 'ImageAnnotation', subfolder))) > 1]
17
+ # split += '_cross_species'
18
+ # elif args.AP10K_EVAL_SUBSET == 'cross-family':
19
+ # categories = ['all']
20
+ # split += '_cross_family'
21
+ # categories = sorted(categories)
22
+ # if split == 'val':
23
+ # # remove category "king cheetah" from categories, since it is not present in the validation set
24
+ # categories.remove('king cheetah')
25
+ elif args.EVAL_DATASET == 'spair': # SPair
26
+ # data_dir = 'data/SPair-71k'
27
+ data_dir = '../../Datasets/SPair-71k'
28
+ categories = sorted(os.listdir(os.path.join(data_dir, 'ImageAnnotation')))
29
+
30
+ return data_dir, categories, split
31
+
32
+
33
+
34
+ # SPair-71k dataset for batch processing
35
+ from PIL import Image
36
+ from torch.utils.data import Dataset
37
+
38
+ class VLDataset(Dataset):
39
+ """A simple dataset to wrap a list of images and prompts for the DataLoader."""
40
+ def __init__(self, images, prompts):
41
+ self.images = images
42
+ self.prompts = prompts
43
+
44
+ def __len__(self):
45
+ return len(self.images)
46
+
47
+ def __getitem__(self, idx):
48
+ # The DataLoader will call this for each item
49
+ return self.images[idx], self.prompts[idx]
50
+
51
+
52
+ class VLDatasetPaired(Dataset):
53
+ """A simple dataset to wrap a list of images and prompts for the DataLoader."""
54
+ def __init__(self, source_imgs, target_imgs, prompts):
55
+ self.source_imgs = source_imgs
56
+ self.target_imgs = target_imgs
57
+ self.prompts = prompts
58
+
59
+ def __len__(self):
60
+ return len(self.source_imgs)
61
+
62
+ def __getitem__(self, idx):
63
+ # The DataLoader will call this for each item
64
+ return self.source_imgs[idx], self.target_imgs[idx], self.prompts[idx]
Code/qwen_inference/inference_batched.py ADDED
@@ -0,0 +1,367 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import wandb
3
+ import matplotlib.pyplot as plt
4
+ from torch.utils.data import DataLoader
5
+ import argparse
6
+ import torch
7
+ from tqdm import tqdm
8
+ from PIL import ImageDraw, ImageFont
9
+ import numpy as np
10
+ import json
11
+
12
+ # mutliprocessing for parallel job preparation
13
+ import concurrent.futures
14
+
15
+ # custom imports
16
+ from dataset import get_dataset_info, VLDataset
17
+ from utils import load_eval_data, load_img_and_kps
18
+ from qwen_utils import QwenVLDetector
19
+ # from predict_correspondence_vlm import create_image_with_one_kp
20
+
21
+ # Place the new, fast drawing function here
22
+ # def create_image_with_one_kp_pil(img, kps, kps_idx=0, circ_size=200, **kwargs):
23
+ # img_with_kp = img.copy()
24
+ # draw = ImageDraw.Draw(img_with_kp)
25
+ # cx, cy = kps[kps_idx, 0], kps[kps_idx, 1]
26
+ # radius = circ_size / 10
27
+ # bbox = [cx - radius, cy - radius, cx + radius, cy + radius]
28
+ # draw.ellipse(bbox, outline="red", width=4)
29
+ # return img_with_kp
30
+
31
+ def create_image_with_one_kp_pil(img, kps, kps_idx=0, circ_size=200, add_text=True, **kwargs):
32
+ img_with_kp = img.copy()
33
+ draw = ImageDraw.Draw(img_with_kp)
34
+ cx, cy = kps[kps_idx, 0], kps[kps_idx, 1]
35
+ radius = circ_size / 10
36
+ bbox = [cx - radius, cy - radius, cx + radius, cy + radius]
37
+ draw.ellipse(bbox, outline="red", width=4)
38
+
39
+ if add_text:
40
+ text = "Ref"
41
+ # Try to use a better font, or fall back to the default if not found
42
+ # try:
43
+ font = ImageFont.truetype("DejaVuSans.ttf", size=26)
44
+ # except IOError:
45
+ # print('test')
46
+ # font = ImageFont.load_default()
47
+
48
+ # Get text bounding box for centering
49
+ # print(font)
50
+ bbox_text = draw.textbbox((0, 0), text, font=font)
51
+ text_width = bbox_text[2] - bbox_text[0]
52
+ text_height = bbox_text[3] - bbox_text[1]
53
+
54
+ text_x = cx - text_width // 2
55
+ text_y = cy - text_height // 2
56
+ draw.text((text_x, text_y), text, font=font, fill=(255, 255, 255), text_anchor = "mm")
57
+ return img_with_kp
58
+
59
+ # Helper function to process a single keypoint (needed for parallelization)
60
+ def prepare_single_job(task_args):
61
+ img1, img2, img1_kps, kps_idx, category, src_id, tgt_id, category_prompt_sem = task_args
62
+
63
+ if img1_kps[kps_idx, 2] == 1:
64
+ # Use the fast, new function
65
+ img1_kp = create_image_with_one_kp_pil(img1, img1_kps, kps_idx=kps_idx)
66
+
67
+ return {
68
+ "img1": img1,
69
+ "img1_kp": img1_kp,
70
+ "img2": img2,
71
+ "prompt_sem": category_prompt_sem,
72
+ "metadata": { "category": category, "src_id": src_id, "tgt_id": tgt_id, "kps_idx": kps_idx }
73
+ }
74
+ return None
75
+
76
+ def run_batched_evaluation(args, model, system_prompt_sem, system_prompt_bbox, task_prompt_sem, task_prompt_bbox):
77
+ """
78
+ Runs the entire evaluation using a batched approach for maximum efficiency.
79
+ """
80
+ # --- Create save directories if provided ---
81
+ if args.SAVE_DIR:
82
+ stage2_dir = os.path.join(args.SAVE_DIR, "stage2_semantics", args.EVAL_DATASET, args.EXP_NOTE)
83
+ stage3_dir = os.path.join(args.SAVE_DIR, "stage3_bboxes", args.EVAL_DATASET, args.EXP_NOTE)
84
+ os.makedirs(stage2_dir, exist_ok=True)
85
+ os.makedirs(stage3_dir, exist_ok=True)
86
+ print(f"Intermediate results will be saved to: {args.SAVE_DIR}")
87
+
88
+
89
+ data_dir, categories, split = get_dataset_info(args, split='test')
90
+ results_table = wandb.Table(columns=["category", "src_id", "tgt_id", "kpt_id", "plot", "extracted_semantics", "src_response", "src_bbox", "src_input_size", "tgt_response", "tgt_bbox", "target_input_size"])
91
+
92
+ # --- STAGE 1: PREPARE ALL INFERENCE JOBS FIRST ---
93
+ print("--- Stage 1: Preparing all inference jobs... ---")
94
+ # inference_jobs = []
95
+ # for category in categories:
96
+ # # print(f"Preparing jobs for category: {category}")
97
+ # category_prompt_sem = task_prompt_sem.format(class_name=category)
98
+ # files, kps, _, _ = load_eval_data(args, data_dir, category, split)
99
+ # N = len(files) // 2
100
+
101
+ # # for pair_idx in range(N):
102
+ # for pair_idx in tqdm(range(N), desc=f"Processing {category} pairs"):
103
+ # img1, img1_kps = load_img_and_kps(idx=2*pair_idx, files=files, kps=kps, img_size=args.ANNO_SIZE)
104
+ # img2, _ = load_img_and_kps(idx=2*pair_idx+1, files=files, kps=kps, img_size=args.ANNO_SIZE)
105
+ # src_id = files[2*pair_idx].split('/')[-1].split('.')[0]
106
+ # tgt_id = files[2*pair_idx+1].split('/')[-1].split('.')[0]
107
+
108
+ # for kps_idx in range(img1_kps.shape[0]):
109
+ # if img1_kps[kps_idx, 2] == 1:
110
+ # # CPU-bound image creation
111
+ # img1_kp = create_image_with_one_kp(img1, img1_kps, kps_idx=kps_idx, add_text=False, add_circle=True)
112
+
113
+ # job = {
114
+ # "img1_kp": img1_kp,
115
+ # "img2": img2,
116
+ # "prompt_sem": category_prompt_sem,
117
+ # "metadata": { "category": category, "src_id": src_id, "tgt_id": tgt_id, "kps_idx": kps_idx }
118
+ # }
119
+ # inference_jobs.append(job)
120
+ # print(f"Prepared {len(inference_jobs)} total jobs.")
121
+
122
+ # First, create a flat list of all tasks to be done
123
+ tasks = []
124
+ for category in categories:
125
+ # category_prompt_sem = task_prompt_sem.format(class_name=category)
126
+ files, kps, _, _ = load_eval_data(args, data_dir, category, split)
127
+ N = len(files) // 2
128
+
129
+ # for pair_idx in range(N):
130
+ for pair_idx in tqdm(range(N), desc=f"Adding {category} pairs"):
131
+ img1, img1_kps = load_img_and_kps(idx=2*pair_idx, files=files, kps=kps, img_size=args.ANNO_SIZE)
132
+ img2, _ = load_img_and_kps(idx=2*pair_idx+1, files=files, kps=kps, img_size=args.ANNO_SIZE)
133
+ src_id = files[2*pair_idx].split('/')[-1].split('.')[0]
134
+ tgt_id = files[2*pair_idx+1].split('/')[-1].split('.')[0]
135
+
136
+ for kps_idx in range(img1_kps.shape[0]):
137
+ # point_x = int(img1_kps[kps_idx, 0])
138
+ # point_y = int(img1_kps[kps_idx, 1])
139
+ # get two decimal places
140
+ point_x = f"{img1_kps[kps_idx, 0]:.2f}"
141
+ point_y = f"{img1_kps[kps_idx, 1]:.2f}"
142
+ category_prompt_sem = task_prompt_sem.format(class_name=category, point_x=point_x, point_y=point_y)
143
+ tasks.append((img1, img2, img1_kps, kps_idx, category, src_id, tgt_id, category_prompt_sem))
144
+
145
+ # Now, process the flat list of tasks in parallel
146
+ inference_jobs = []
147
+ # Use max_workers=None to use all available CPU cores
148
+ with concurrent.futures.ProcessPoolExecutor(max_workers=8) as executor:
149
+ # `map` will apply `prepare_single_job` to each item in `tasks` across multiple processes
150
+ # `tqdm` provides a progress bar
151
+ results = list(tqdm(executor.map(prepare_single_job, tasks), total=len(tasks), desc="Preparing Jobs"))
152
+
153
+ # Filter out None results (from invisible keypoints)
154
+ inference_jobs = [job for job in results if job is not None]
155
+
156
+ print(f"Prepared {len(inference_jobs)} total jobs.")
157
+
158
+
159
+ # --- STAGE 2: BATCHED SEMANTIC EXTRACTION (1st Model Call) ---
160
+ print("\n--- Stage 2: Running batched semantic extraction... ---")
161
+ src_images = [job["img1_kp"] for job in inference_jobs]
162
+ src_prompts = [job["prompt_sem"] for job in inference_jobs]
163
+
164
+ dataset_sem = VLDataset(src_images, src_prompts)
165
+ loader_sem = DataLoader(dataset_sem, batch_size=args.BATCH_SIZE, num_workers=args.NUM_WORKERS, collate_fn=lambda x: x, pin_memory=True)
166
+
167
+ all_src_results = []
168
+ # for batch in loader_sem:
169
+ for batch in tqdm(loader_sem, desc="Processing semantic extraction batches"):
170
+ images_pil, text_prompts = zip(*batch)
171
+ # Use the base `chat` method as it seems you want raw text back
172
+ # NOTE: You might need a batched version of `chat` if it doesn't support lists
173
+ results = model.chat_batch(list(images_pil), list(text_prompts), system_prompt_sem, 'self-handled') # Assuming model.chat is updated for batching
174
+ all_src_results.extend(results)
175
+
176
+ # Extract semantics and prepare for the next stage
177
+ for i, job in enumerate(inference_jobs):
178
+ response_text = all_src_results[i]['response']
179
+ job["metadata"]["src_input_size"] = all_src_results[i]['input_size']
180
+ job["src_response"] = response_text
181
+ job["extracted_semantics"] = response_text.split("Keypoint component:")[-1].strip()
182
+ job["src_bbox"] = model._get_bounding_boxes(response_text, all_src_results[i]['input_size'], job['img1_kp'].size)
183
+
184
+
185
+ # --- SAVE STAGE 2 RESULTS IMMEDIATELY ---
186
+ if args.SAVE_DIR:
187
+ print(f"Saving Stage 2 results to {stage2_dir}...")
188
+ for job in tqdm(inference_jobs, desc="Saving Stage 2 results"):
189
+ meta = job["metadata"]
190
+ if args.DEBUG:
191
+ filename = f"DEBUG:{meta['category']}_{meta['src_id']}_{meta['tgt_id']}_kps{meta['kps_idx']}.json"
192
+ else:
193
+ filename = f"{meta['category']}_{meta['src_id']}_{meta['tgt_id']}_kps{meta['kps_idx']}.json"
194
+ save_path = os.path.join(stage2_dir, filename)
195
+ src_bbox = list(job["src_bbox"].values())[0]['bbox'] if job["src_bbox"] and job["src_bbox"].values() else None
196
+ output_data = {
197
+ "metadata": meta,
198
+ "full_response": job["src_response"],
199
+ "extracted_semantics": job["extracted_semantics"],
200
+ "source_bbox": src_bbox,
201
+ }
202
+ with open(save_path, 'w') as f:
203
+ json.dump(output_data, f, indent=4)
204
+
205
+ # --- STAGE 3: BATCHED BOUNDING BOX PREDICTION (2nd Model Call) ---
206
+ print("\n--- Stage 3: Running batched bounding box prediction... ---")
207
+ if args.DEBUG:
208
+ # print("Debug mode enabled: Saving intermediate results to disk.")
209
+ print("Debug mode enabled: Using img1 for bounding box prediction.")
210
+ tgt_images = [job["img1"] for job in inference_jobs]
211
+ else:
212
+ tgt_images = [job["img2"] for job in inference_jobs]
213
+ tgt_prompts = [task_prompt_bbox.format(class_name=job["metadata"]["category"], extracted_semantics=job["extracted_semantics"]) for job in inference_jobs]
214
+
215
+ dataset_bbox = VLDataset(tgt_images, tgt_prompts)
216
+ loader_bbox = DataLoader(dataset_bbox, batch_size=args.BATCH_SIZE, num_workers=args.NUM_WORKERS, collate_fn=lambda x: x)
217
+
218
+ all_tgt_results = []
219
+ # for batch in loader_bbox:
220
+ for batch in tqdm(loader_bbox, desc="Processing bounding box prediction batches"):
221
+ images_pil, text_prompts = zip(*batch)
222
+ # Assuming model.predict is updated for batching
223
+ results = model.predict_batch(list(images_pil), list(text_prompts), system_prompt_bbox, 'object')
224
+ all_tgt_results.extend(results)
225
+
226
+ # --- SAVE STAGE 3 RESULTS IMMEDIATELY ---
227
+ if args.SAVE_DIR:
228
+ print(f"Saving Stage 3 results to {stage3_dir}...")
229
+ for i, job in enumerate(tqdm(inference_jobs, desc="Saving Stage 3 results")):
230
+ meta = job["metadata"]
231
+ tgt_result = all_tgt_results[i]
232
+ if args.DEBUG:
233
+ tgt_bbox = model.predict_bounding_boxes(job["img1"], tgt_result['response'], tgt_result['input_size'])
234
+ else:
235
+ tgt_bbox = model.predict_bounding_boxes(job["img2"], tgt_result['response'], tgt_result['input_size'])
236
+
237
+ filename = f"{meta['category']}_{meta['src_id']}_{meta['tgt_id']}_kps{meta['kps_idx']}.json"
238
+ save_path = os.path.join(stage3_dir, filename)
239
+ output_data = {
240
+ "metadata": meta,
241
+ "full_response": tgt_result['response'],
242
+ "target_bbox": tgt_bbox,
243
+ "target_input_size": tgt_result['input_size'],
244
+ }
245
+ with open(save_path, 'w') as f:
246
+ json.dump(output_data, f, indent=4)
247
+
248
+
249
+
250
+ # --- STAGE 4: LOGGING ---
251
+ print("\n--- Stage 4: Plotting and logging results to WandB... ---")
252
+ # for i, job in enumerate(inference_jobs):
253
+ for i, job in tqdm(enumerate(inference_jobs), total=len(inference_jobs), desc="Logging results"):
254
+ meta = job["metadata"]
255
+ src_bbox_dict = job["src_bbox"]
256
+ tgt_result = all_tgt_results[i]
257
+
258
+ src_bbox = list(src_bbox_dict.values())[0]['bbox'] if src_bbox_dict else None
259
+ tgt_bbox = model.predict_bounding_boxes(job["img2"], tgt_result['response'], tgt_result['input_size'])
260
+
261
+ # Create plot
262
+ # --- COMPLETED PLOTTING LOGIC ---
263
+ # Get the source and target images for plotting
264
+ img1_kp = job["img1_kp"]
265
+ if args.DEBUG:
266
+ img2 = job["img1"] # Use img1 for bbox prediction in debug mode
267
+ else:
268
+ img2 = job["img2"]
269
+
270
+ fig, axes = plt.subplots(1, 2, figsize=(12, 6))
271
+ axes[0].imshow(np.array(img1_kp))
272
+ axes[0].axis('off')
273
+ axes[0].set_title('Source Image with Keypoint')
274
+ axes[1].imshow(np.array(img2))
275
+ axes[1].axis('off')
276
+ axes[1].set_title('Target Image with Bounding Box')
277
+ if src_bbox:
278
+ abs_x1, abs_y1, abs_x2, abs_y2 = src_bbox
279
+ axes[0].add_patch(plt.Rectangle((abs_x1, abs_y1), abs_x2 - abs_x1, abs_y2 - abs_y1, edgecolor='green', facecolor='none', linewidth=2))
280
+
281
+ if tgt_bbox:
282
+ abs_x1, abs_y1, abs_x2, abs_y2 = tgt_bbox
283
+ axes[1].add_patch(plt.Rectangle((abs_x1, abs_y1), abs_x2 - abs_x1, abs_y2 - abs_y1, edgecolor='green', facecolor='none', linewidth=2))
284
+ else:
285
+ axes[1].text(img2.width / 2, img2.height / 2, "No bounding box found", color='red', fontsize=12, ha='center', va='center')
286
+
287
+ fig.tight_layout()
288
+
289
+ wandb_img = wandb.Image(fig)
290
+ plt.close(fig)
291
+
292
+
293
+ results_table.add_data(
294
+ meta["category"],
295
+ meta["src_id"],
296
+ meta["tgt_id"],
297
+ meta["kps_idx"],
298
+ wandb_img,
299
+ job["extracted_semantics"],
300
+ job["src_response"],
301
+ str(src_bbox),
302
+ job["metadata"]["src_input_size"],
303
+ tgt_result['response'],
304
+ str(tgt_bbox),
305
+ tgt_result['input_size']
306
+ )
307
+
308
+ wandb.log({"evaluation_results": results_table})
309
+
310
+
311
+ # ===================================================================
312
+ # 4. YOUR ORIGINAL `main` FUNCTION, NOW SIMPLIFIED TO CALL THE BATCHED RUNNER
313
+ # ===================================================================
314
+ def main(args):
315
+ with open(args.SYSTEM_PROMPT_SEM, 'r') as f:
316
+ system_prompt_sem = f.read()
317
+ with open(args.SYSTEM_PROMPT_BBOX, 'r') as f:
318
+ system_prompt_bbox = f.read()
319
+ with open(args.TASK_PROMPT_SEM, 'r') as f:
320
+ task_prompt_sem = f.read()
321
+ with open(args.TASK_PROMPT_BBOX, 'r') as f:
322
+ task_prompt_bbox = f.read()
323
+
324
+ # Initialize the Qwen VLM model
325
+ print("Initializing Qwen model...")
326
+ model = QwenVLDetector(model_dir=None, torch_dtype=torch.bfloat16, model_name=args.MODEL_NAME, device="auto", flash_attn=True)
327
+ # model = QwenVLDetector(model_dir=None, torch_dtype=torch.bfloat16, model_name="Qwen/Qwen2.5-VL-7B-Instruct", device="auto", flash_attn=True)
328
+
329
+ # Initialize WandB
330
+ print("Initializing WandB...")
331
+ wandb.init(
332
+ project=args.EVAL_DATASET,
333
+ entity="amazon_intern2025",
334
+ name=args.EXP_NOTE,
335
+ config=vars(args)
336
+ )
337
+
338
+ # Run the optimized evaluation
339
+ run_batched_evaluation(args, model, system_prompt_sem, system_prompt_bbox, task_prompt_sem, task_prompt_bbox)
340
+
341
+ print('Finished processing all categories and logging results.')
342
+ wandb.finish()
343
+ print('WandB run finished.')
344
+
345
+
346
+ if __name__ == "__main__":
347
+ parser = argparse.ArgumentParser(description="Predict correspondence using Qwen VLM.")
348
+ # ... (all your existing arguments) ...
349
+ parser.add_argument('--SYSTEM_PROMPT_SEM', type=str, required=True)
350
+ parser.add_argument('--SYSTEM_PROMPT_BBOX', type=str, required=True)
351
+ parser.add_argument('--TASK_PROMPT_SEM', type=str, required=True)
352
+ parser.add_argument('--TASK_PROMPT_BBOX', type=str, required=True)
353
+ parser.add_argument('--EVAL_DATASET', type=str, required=True, choices=['pascal', 'spair'])
354
+ parser.add_argument('--ANNO_SIZE', type=int, default=840)
355
+ parser.add_argument('--TEST_SAMPLE', type=int, default=0)
356
+ parser.add_argument('--EXP_NOTE', type=str, default='Qwen VLM demo')
357
+ parser.add_argument('--SAVE_DIR', type=str, default='./results_vlm/', help='Directory to save intermediate results.')
358
+ parser.add_argument('--DEBUG', action='store_true', help='Enable debug mode for verbose output.') # decouple the prediction
359
+ parser.add_argument('--MODEL_NAME', type=str, default='Qwen/Qwen2.5-VL-32B-Instruct', help='Model name for Qwen VLM.')
360
+
361
+ # --- ADD THESE NEW ARGUMENTS for controlling batching ---
362
+ parser.add_argument('--BATCH_SIZE', type=int, default=4, help='Batch size for GPU inference.')
363
+ parser.add_argument('--NUM_WORKERS', type=int, default=8, help='Number of CPU cores for data loading.')
364
+
365
+
366
+ args = parser.parse_args()
367
+ main(args)
Code/qwen_inference/prompts/sys_bbox.txt ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ You are a helpful assistant.
2
+
3
+ # Task
4
+ 1. Analyze the image of a {class_name} and locate the most prominent instance of the component described by: "{extracted_semantics}".
5
+ 2. Interpret the semantics carefully — including spatial orientation (e.g., left/right from object's perspective), attachment, and visibility.
6
+ 3. If the component is clearly visible and matches the description, draw a tight 2-D bounding box around it in absolute pixel coordinates: [x1, y1, x2, y2].
7
+ 4. If the component is:
8
+ - not present,
9
+ - fully occluded,
10
+ - ambiguous, or
11
+ - does not match the semantics (e.g., wrong side, wrong part),
12
+ return an empty list: [].
13
+
14
+ # Output Rules
15
+ - On a **new line**, output the result in **exactly** this JSON format:
16
+ ```json
17
+ [
18
+ {"bbox_2d": [x1, y1, x2, y2], "label": "<component-label>"}
19
+ ]
20
+ ```
21
+ - OR, if not found
22
+ ```json
23
+ []
24
+ ```
25
+ - All coordinates are in absolute pixels; assume image size is 840×840 with (0,0) at top-left.
26
+ - No extra text, explanations, or formatting outside the JSON.
27
+
28
+ # Examples:
29
+ Thought: The semantics "left engine (mounted under left wing, pilot's perspective)" refer to the engine on the pilot’s left. In this image, the left engine is visible and matches the description.
30
+ ```json
31
+ [
32
+ {"bbox_2d": [x1, y1, x2, y2], "label": "left engine"}
33
+ ]
34
+ ```
35
+
36
+ Thought: The semantics "right ear (upright, located on the top-right side of the head from the object's perspective)" describe the right ear. However, the dog’s head is partially cropped, and the right ear is not visible.
37
+ ```json
38
+ []
39
+ ```
Code/qwen_inference/prompts/tsk_bbox.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ Locate the most prominent instance of the component described by "{extracted_semantics}" in the image of a {class_name}. The component should match the described location, orientation, and attachment from the object’s perspective. Assume the image is 840×840 pixels with (0,0) at the top-left. If the component is clearly visible and matches the semantics, return a single tight bounding box in absolute pixel coordinates [x1, y1, x2, y2]. Otherwise, return an empty list.
2
+
3
+
4
+
Code/qwen_inference/prompts/tsk_vlm_judge.txt ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ You are given two images:
2
+
3
+ - **First image (source)**: includes a green bounding box marking a specific semantic part of an object.
4
+ - **Second image (target)**: contains a **predicted bounding box**, which may or may not be present.
5
+
6
+ Your task is to **judge the correctness of the prediction** in the second image.
7
+
8
+ Instructions:
9
+
10
+ 1. If the **second image contains a green bounding box**, determine whether it refers to the **same semantic part** as highlighted in the first image. Consider:
11
+ - Object orientation and viewpoint
12
+ - Symmetry and part alignment
13
+ - Whether the semantic part is visible and appropriately localized
14
+
15
+ 2. If the **second image does NOT contain a bounding box**, determine whether this is correct:
16
+ - If the semantic part from the first image is **absent, occluded, or cropped** in the second image, then **no bounding box is a correct prediction**.
17
+ - If the part is still visible or partially visible, then **a bounding box should have been predicted**, and the absence is incorrect.
18
+
19
+ **Output your response in the following format:**
20
+
21
+ <reasoning>
22
+ <final_answer> (only output "yes" or "no")
Code/qwen_inference/qwen_utils.py ADDED
@@ -0,0 +1,1289 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # class PromptManager():
2
+ # def __init__(self):
3
+ # pass
4
+
5
+ # def construct_prompt(self, text_prompt, prompt_type):
6
+ # if prompt_type.lower() == 'object':
7
+ # prompt = f"""Outline the position of each {text_prompt} and output all bounding box coordinates in JSON format"""
8
+ # elif prompt_type.lower() == 'self-handled':
9
+ # prompt = text_prompt
10
+ # else:
11
+ # raise NotImplementedError
12
+
13
+ # return prompt
14
+
15
+ import os
16
+ from os import path
17
+ from PIL import Image, ImageDraw, ImageFont
18
+ from PIL import ImageColor
19
+ import json
20
+ import requests
21
+ from io import BytesIO
22
+ from urllib.parse import urlparse
23
+ import pathlib
24
+ import torch
25
+ from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor
26
+ from qwen_vl_utils import process_vision_info
27
+
28
+ class PromptManager:
29
+ """
30
+ Builds a prompt that forces the LLM to return
31
+ a *pure* JSON list of bounding-box dictionaries:
32
+
33
+ [
34
+ {"bbox_2d": [x1, y1, x2, y2], "label": "<your-label>"},
35
+ ...
36
+ ]
37
+ """
38
+
39
+ @staticmethod
40
+ def construct_prompt(text_prompt: str, prompt_type: str = "object") -> str:
41
+ if prompt_type.lower() == "object":
42
+ prompt = text_prompt+"""
43
+ Required format (copy this structure exactly):
44
+ ```json
45
+ [
46
+ {"bbox_2d": [x1, y1, x2, y2], "label": "<your-label>"},
47
+ ]
48
+ ```
49
+ """
50
+ return prompt
51
+ elif prompt_type.lower() == "self-handled":
52
+ return text_prompt
53
+ else:
54
+ raise NotImplementedError("prompt_type must be 'object' or 'self-handled'")
55
+
56
+
57
+ def load_image(source, image_path=None):
58
+ try:
59
+ parsed = urlparse(source)
60
+ is_url = bool(parsed.scheme and parsed.netloc)
61
+
62
+ if is_url:
63
+ response = requests.get(source, stream=True)
64
+ response.raise_for_status()
65
+ img = Image.open(BytesIO(response.content))
66
+ else:
67
+ if path.exists(source):
68
+ img = Image.open(source)
69
+ else:
70
+ print(f"Error: Local file not found at {source}")
71
+ return None
72
+
73
+ if image_path is not None:
74
+ directory = path.dirname(image_path)
75
+ if directory and not path.exists(directory):
76
+ pathlib.Path(directory).mkdir(parents=True, exist_ok=True)
77
+ img.save(image_path)
78
+ print(f"Image saved to {image_path}")
79
+
80
+ return img
81
+
82
+ except Exception as e:
83
+ print(f"Error loading image: {e}")
84
+ return None
85
+
86
+
87
+ class QwenVLDetector():
88
+ def __init__(self, model_dir=None, torch_dtype=torch.bfloat16, device="cuda:0", model_name="Qwen/Qwen2.5-VL-7B-Instruct", flash_attn=False):
89
+ if model_dir is not None:
90
+ self.model = Qwen2_5_VLForConditionalGeneration.from_pretrained(model_dir, torch_dtype=torch_dtype, device_map=device, local_files_only=True)
91
+ self.processor = AutoProcessor.from_pretrained(model_dir)
92
+ else:
93
+ attn_impl = "flash_attention_2" if flash_attn else "sdpa"
94
+ self.model = Qwen2_5_VLForConditionalGeneration.from_pretrained(model_name, torch_dtype=torch_dtype, device_map=device, attn_implementation=attn_impl)
95
+ self.processor = AutoProcessor.from_pretrained(model_name)
96
+
97
+
98
+ self.processor.tokenizer.padding_side = "left"
99
+ self.prompt_manager = PromptManager()
100
+ self._init_parameters()
101
+
102
+ def _init_parameters(self):
103
+ self.max_size = 1024
104
+ self.max_new_tokens = 1024
105
+ self.min_pixels = 512 * 512
106
+ self.max_pixels = self.max_size * self.max_size
107
+
108
+ def resize_image(self, img, max_size):
109
+ width, height = img.size
110
+ if max(width, height) <= max_size:
111
+ return img
112
+ scaling_factor = max_size / max(width, height)
113
+ new_width = int(width * scaling_factor)
114
+ new_height = int(height * scaling_factor)
115
+ return img.resize((new_width, new_height), Image.Resampling.LANCZOS)
116
+
117
+ def _parse_json(self, response_text):
118
+ """
119
+ Parses a JSON object from a model's response text.
120
+ Prioritizes extracting from a ```json markdown fence.
121
+ """
122
+ try:
123
+ # Priority 1: Look for a JSON markdown fence. This is the most reliable.
124
+ if "```json" in response_text:
125
+ # Isolate the string between the fences
126
+ json_str = response_text.split("```json")[1].split("```")[0]
127
+ return json_str.strip()
128
+
129
+ # Priority 2: If no fence, find the first '[' and last ']'
130
+ # This is less reliable and can cause the error you saw.
131
+ start_idx = response_text.find('[')
132
+ end_idx = response_text.rfind(']')
133
+
134
+ if start_idx != -1 and end_idx != -1 and end_idx > start_idx:
135
+ return response_text[start_idx : end_idx + 1]
136
+
137
+ except (IndexError, json.JSONDecodeError):
138
+ # This will catch errors from malformed markdown or other slicing issues.
139
+ pass
140
+
141
+ # Return None if no valid JSON is found
142
+ return None
143
+
144
+ def calculate_iou(self, box1, box2):
145
+ x1_1, y1_1, x2_1, y2_1 = box1
146
+ x1_2, y1_2, x2_2, y2_2 = box2
147
+ area1 = (x2_1 - x1_1) * (y2_1 - y1_1)
148
+ area2 = (x2_2 - x1_2) * (y2_2 - y1_2)
149
+ x1_i, y1_i = max(x1_1, x1_2), max(y1_1, y1_2)
150
+ x2_i, y2_i = min(x2_1, x2_2), min(y2_1, y2_2)
151
+ if x2_i <= x1_i or y2_i <= y1_i:
152
+ return 0.0
153
+ intersection_area = (x2_i - x1_i) * (y2_i - y1_i)
154
+ union_area = area1 + area2 - intersection_area
155
+ return intersection_area / union_area if union_area > 0 else 0.0
156
+
157
+ def _check_bbox(self, bbox_with_label, bbox_coor, size, threshold=0.7):
158
+ x1, y1, x2, y2 = bbox_coor
159
+ width, height = size
160
+ if (x2 - x1) * (y2 - y1) / (width * height) >= threshold:
161
+ return False
162
+ for bbox_label in bbox_with_label.values():
163
+ if self.calculate_iou(bbox_label['bbox'], bbox_coor) >= threshold:
164
+ return False
165
+ return True
166
+
167
+ def _get_bounding_boxes(self, response, input_size, size):
168
+ bounding_boxes_str = self._parse_json(response)
169
+ print(f"Bounding boxes string: {bounding_boxes_str}")
170
+ if not bounding_boxes_str:
171
+ return {}
172
+
173
+ input_width, input_height = input_size
174
+ width, height = size
175
+
176
+ try:
177
+ # Use robust, standard JSON parsing
178
+ json_output = json.loads(bounding_boxes_str)
179
+ except json.JSONDecodeError:
180
+ print(f"Warning: Could not decode JSON from model output: {bounding_boxes_str}")
181
+ return {}
182
+
183
+ bbox_with_label = {}
184
+ if isinstance(json_output, list) and len(json_output) > 0:
185
+ for i, bounding_box in enumerate(json_output):
186
+ try:
187
+ coords = bounding_box["bbox_2d"]
188
+ abs_x1 = int(coords[0] / input_width * width)
189
+ abs_y1 = int(coords[1] / input_height * height)
190
+ abs_x2 = int(coords[2] / input_width * width)
191
+ abs_y2 = int(coords[3] / input_height * height)
192
+
193
+ if abs_x1 > abs_x2: abs_x1, abs_x2 = abs_x2, abs_x1
194
+ if abs_y1 > abs_y2: abs_y1, abs_y2 = abs_y2, abs_y1
195
+
196
+ if self._check_bbox(bbox_with_label, [abs_x1, abs_y1, abs_x2, abs_y2], size):
197
+ bbox_with_label[i] = {'bbox': [abs_x1, abs_y1, abs_x2, abs_y2], 'label': bounding_box.get('label')}
198
+
199
+ except (KeyError, IndexError, TypeError) as e:
200
+ print(f"Error processing bounding box {bounding_box}: {e}")
201
+
202
+ return bbox_with_label
203
+
204
+ # Other methods like chat, predict, plot_bounding_boxes etc. remain the same,
205
+ # but I've included a corrected plot_bounding_boxes method for robustness.
206
+ def chat(self, image, text_prompt, system_prompt, prompt_type):
207
+ curr_prompt = self.prompt_manager.construct_prompt(text_prompt, prompt_type)
208
+
209
+ # Handle the image input: either load from URL or use the PIL Image directly
210
+ if isinstance(image, str):
211
+ # It's a URL or path, load the image
212
+ image_type = 'str'
213
+ image_pil = load_image(image)
214
+ size = image_pil.size
215
+ messages = [
216
+ {
217
+ "role": "system",
218
+ "content": system_prompt
219
+ },
220
+ {
221
+ "role": "user",
222
+ "content": [
223
+ {
224
+ "type": "text",
225
+ "text": curr_prompt
226
+ },
227
+ {
228
+ "type": "image",
229
+ "image": image,
230
+ "min_pixels": self.min_pixels,
231
+ "max_pixels": self.max_pixels
232
+ }
233
+ ]
234
+ }
235
+ ]
236
+
237
+ text = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
238
+ image_inputs, video_inputs = process_vision_info(messages)
239
+ inputs = self.processor(text=[text],
240
+ images=image_inputs,
241
+ videos=video_inputs,
242
+ padding=True,
243
+ return_tensors="pt"
244
+ ).to(self.model.device)
245
+
246
+ else:
247
+ image_type = 'pil'
248
+ # Assume it's already a PIL Image
249
+ image_pil = image
250
+ # For the message content, we need to use the image directly
251
+
252
+ size = image_pil.size
253
+ image_for_message = self.resize_image(image_pil, self.max_size)
254
+
255
+ messages = [
256
+ {
257
+ "role": "system",
258
+ "content": system_prompt
259
+ },
260
+ {
261
+ "role": "user",
262
+ "content": [
263
+ {
264
+ "type": "text",
265
+ "text": curr_prompt
266
+ },
267
+ {
268
+ "type": "image",
269
+ "image": image_for_message
270
+ }
271
+ ]
272
+ }
273
+ ]
274
+
275
+ text = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
276
+ inputs = self.processor(text=[text], images=[image_for_message], padding=True, return_tensors="pt").to(self.model.device)
277
+
278
+ output_ids = self.model.generate(**inputs, max_new_tokens=self.max_new_tokens)
279
+ generated_ids = [output_ids[len(input_ids):] for input_ids, output_ids in zip(inputs.input_ids, output_ids)]
280
+ output_text = self.processor.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
281
+ response = output_text[0]
282
+
283
+ input_height = inputs['image_grid_thw'][0][1] * 14
284
+ input_width = inputs['image_grid_thw'][0][2] * 14
285
+ input_size = (input_width.item(), input_height.item())
286
+
287
+ del text, inputs, output_ids, generated_ids, output_text
288
+ torch.cuda.empty_cache()
289
+
290
+ return {'response': response, 'input_size': input_size}
291
+
292
+ # def predict(self, image, text_prompt, system_prompt, prompt_type='object', bbox_refine=False):
293
+ # # This method's internal logic remains the same, as it calls _get_bounding_boxes
294
+ # # which has been corrected.
295
+ # curr_prompt = self.prompt_manager.construct_prompt(text_prompt, prompt_type)
296
+
297
+ # print('this is my curr prompt:', curr_prompt)
298
+
299
+ # if isinstance(image, str):
300
+ # image_pil = load_image(image)
301
+ # image_for_message = image
302
+ # else:
303
+ # image_pil = image
304
+ # image_for_message = self.resize_image(image_pil, self.max_size)
305
+
306
+ # size = image_pil.size
307
+ # messages = [
308
+ # {"role": "system", "content": system_prompt},
309
+ # {"role": "user", "content": [{"type": "text", "text": curr_prompt}, {"type": "image", "image": image_for_message}]}
310
+ # ]
311
+
312
+ # text = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
313
+ # inputs = self.processor(text=[text], images=[image_for_message], padding=True, return_tensors="pt").to(self.model.device)
314
+
315
+ # output_ids = self.model.generate(**inputs, max_new_tokens=self.max_new_tokens)
316
+ # generated_ids = output_ids[:, inputs.input_ids.shape[1]:]
317
+ # response = self.processor.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)[0]
318
+
319
+ # input_height = inputs['image_grid_thw'][0][1] * 14
320
+ # input_width = inputs['image_grid_thw'][0][2] * 14
321
+ # input_size = (input_width.item(), input_height.item())
322
+
323
+ # # Cleanup
324
+ # del text, inputs, output_ids, generated_ids
325
+ # torch.cuda.empty_cache()
326
+
327
+ # # Bbox refine logic can be kept if needed
328
+ # # if bbox_refine: ...
329
+
330
+ # bbox_with_label = self._get_bounding_boxes(response, input_size, size)
331
+ # results = {'bbox_with_label': bbox_with_label, 'response': response, 'input_size': input_size}
332
+
333
+ # return results
334
+
335
+ def predict(self, image, text_prompt, system_prompt, prompt_type='object', bbox_refine=False):
336
+ curr_prompt = self.prompt_manager.construct_prompt(text_prompt, prompt_type)
337
+
338
+ print('this is my curr prompt:', curr_prompt)
339
+
340
+ # Handle the image input: either load from URL or use the PIL Image directly
341
+ if isinstance(image, str):
342
+ # It's a URL or path, load the image
343
+ image_type = 'str'
344
+ image_pil = load_image(image)
345
+ size = image_pil.size
346
+ image_for_message = image
347
+ messages = [
348
+ {
349
+ "role": "system",
350
+ "content": system_prompt
351
+ },
352
+ {
353
+ "role": "user",
354
+ "content": [
355
+ {
356
+ "type": "text",
357
+ "text": curr_prompt
358
+ },
359
+ {
360
+ "type": "image",
361
+ "image": image,
362
+ "min_pixels": self.min_pixels,
363
+ "max_pixels": self.max_pixels
364
+ }
365
+ ]
366
+ }
367
+ ]
368
+
369
+ text = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
370
+ image_inputs, video_inputs = process_vision_info(messages)
371
+ inputs = self.processor(text=[text],
372
+ images=image_inputs,
373
+ videos=video_inputs,
374
+ padding=True,
375
+ return_tensors="pt"
376
+ ).to(self.model.device)
377
+
378
+ else:
379
+ image_type = 'pil'
380
+ # Assume it's already a PIL Image
381
+ image_pil = image
382
+ # For the message content, we need to use the image directly
383
+
384
+ size = image_pil.size
385
+ image_for_message = self.resize_image(image_pil, self.max_size)
386
+
387
+ messages = [
388
+ {
389
+ "role": "system",
390
+ "content": system_prompt
391
+ },
392
+ {
393
+ "role": "user",
394
+ "content": [
395
+ {
396
+ "type": "text",
397
+ "text": curr_prompt
398
+ },
399
+ {
400
+ "type": "image",
401
+ "image": image_for_message
402
+ }
403
+ ]
404
+ }
405
+ ]
406
+
407
+ text = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
408
+ inputs = self.processor(text=[text], images=[image_for_message], padding=True, return_tensors="pt").to(self.model.device)
409
+
410
+ output_ids = self.model.generate(**inputs, max_new_tokens=self.max_new_tokens)
411
+ generated_ids = [output_ids[len(input_ids):] for input_ids, output_ids in zip(inputs.input_ids, output_ids)]
412
+ output_text = self.processor.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
413
+
414
+ input_height = inputs['image_grid_thw'][0][1]*14
415
+ input_width = inputs['image_grid_thw'][0][2]*14
416
+
417
+ response_org = output_text[0]
418
+ input_size = (input_width.item(), input_height.item())
419
+
420
+ del text, inputs, output_ids, generated_ids, output_text
421
+ torch.cuda.empty_cache()
422
+
423
+ if bbox_refine:
424
+ response = self._bbox_refine(response_org, messages, image_type, image_for_message)
425
+ else:
426
+ response = response_org
427
+
428
+ results = dict()
429
+ bbox_with_label = self._get_bounding_boxes(response, input_size, size)
430
+ results = {'bbox_with_label': bbox_with_label, 'response': response, 'input_size': input_size}
431
+
432
+ return results
433
+
434
+ def plot_bounding_boxes(self, image, response, input_size, output_path='./temp.png'):
435
+ img = image.copy()
436
+ width, height = img.size
437
+ input_width, input_height = input_size
438
+
439
+ draw = ImageDraw.Draw(img)
440
+ colors = list(ImageColor.colormap.keys())
441
+ font = ImageFont.load_default()
442
+
443
+ bounding_boxes_str = self._parse_json(response)
444
+ if not bounding_boxes_str:
445
+ print("No bounding boxes found in the response.")
446
+ img.save(output_path)
447
+ return
448
+
449
+ try:
450
+ # Use robust, standard JSON parsing
451
+ json_output = json.loads(bounding_boxes_str)
452
+ except json.JSONDecodeError:
453
+ print(f"Warning: Could not decode JSON for plotting: {bounding_boxes_str}")
454
+ img.save(output_path)
455
+ return
456
+
457
+ if isinstance(json_output, list) and len(json_output) > 0:
458
+ for i, bounding_box in enumerate(json_output):
459
+ try:
460
+ color = colors[i % len(colors)]
461
+ coords = bounding_box["bbox_2d"]
462
+ abs_x1 = int(coords[0] / input_width * width)
463
+ abs_y1 = int(coords[1] / input_height * height)
464
+ abs_x2 = int(coords[2] / input_width * width)
465
+ abs_y2 = int(coords[3] / input_height * height)
466
+
467
+ if abs_x1 > abs_x2: abs_x1, abs_x2 = abs_x2, abs_x1
468
+ if abs_y1 > abs_y2: abs_y1, abs_y2 = abs_y2, abs_y1
469
+
470
+ draw.rectangle(((abs_x1, abs_y1), (abs_x2, abs_y2)), outline=color, width=4)
471
+
472
+ if "label" in bounding_box:
473
+ draw.text((abs_x1 + 8, abs_y1 + 6), str(bounding_box["label"]), fill=color, font=font)
474
+ except (KeyError, IndexError, TypeError) as e:
475
+ print(f"Error processing bounding box for plotting {bounding_box}: {e}")
476
+
477
+ img.save(output_path)
478
+ print(f"Image with bounding boxes saved to: {output_path}")
479
+
480
+ def predict_bounding_boxes(self, image, response, input_size):
481
+ # This method is for extracting coordinates, so it also benefits from robust parsing
482
+ width, height = image.size
483
+ input_width, input_height = input_size
484
+
485
+ bounding_boxes_str = self._parse_json(response)
486
+ if not bounding_boxes_str:
487
+ return None
488
+
489
+ try:
490
+ # Use robust, standard JSON parsing
491
+ json_output = json.loads(bounding_boxes_str)
492
+ except json.JSONDecodeError:
493
+ return None
494
+
495
+ if isinstance(json_output, list) and len(json_output) > 0:
496
+ try:
497
+ bounding_box = json_output[0] # Assuming we only want the first one
498
+ coords = bounding_box["bbox_2d"]
499
+ abs_x1 = int(coords[0] / input_width * width)
500
+ abs_y1 = int(coords[1] / input_height * height)
501
+ abs_x2 = int(coords[2] / input_width * width)
502
+ abs_y2 = int(coords[3] / input_height * height)
503
+ if abs_x1 > abs_x2: abs_x1, abs_x2 = abs_x2, abs_x1
504
+ if abs_y1 > abs_y2: abs_y1, abs_y2 = abs_y2, abs_y1
505
+ return [abs_x1, abs_y1, abs_x2, abs_y2]
506
+ except (KeyError, IndexError, TypeError):
507
+ return None
508
+ return None
509
+
510
+ # ===================================================================
511
+ # NEW BATCHED INFERENCE FUNCTIONS
512
+ # ===================================================================
513
+ def chat_batch(self, images, text_prompts, system_prompt, prompt_type):
514
+ """Processes a batch of images and prompts for chat-style interaction."""
515
+ if not images:
516
+ return []
517
+
518
+ # 1. Prepare all inputs for the batch
519
+ prompts = [self.prompt_manager.construct_prompt(tp, prompt_type) for tp in text_prompts]
520
+ images_for_message = [self.resize_image(img, self.max_size) for img in images]
521
+
522
+ print(f"Batch size: {len(images)}")
523
+ print(f"Prompts: {prompts}")
524
+
525
+ # 2. Create message structures for the entire batch
526
+ batch_messages = []
527
+ for i in range(len(images)):
528
+ batch_messages.append([
529
+ {"role": "system", "content": system_prompt},
530
+ {"role": "user", "content": [{"type": "text", "text": prompts[i]}, {"type": "image"}]}
531
+ ])
532
+
533
+ # 3. Use the processor on the entire batch at once
534
+ text_for_processor = [self.processor.apply_chat_template(m, tokenize=False, add_generation_prompt=True) for m in batch_messages]
535
+ inputs = self.processor(
536
+ text=text_for_processor,
537
+ images=images_for_message,
538
+ padding=True,
539
+ return_tensors="pt"
540
+ ).to(self.model.device)
541
+
542
+ # 4. Generate outputs for the entire batch
543
+ output_ids = self.model.generate(**inputs, max_new_tokens=self.max_new_tokens)
544
+ generated_ids = [out[len(inp):] for inp, out in zip(inputs.input_ids, output_ids)]
545
+ responses = self.processor.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
546
+
547
+ # 5. Format results
548
+ input_height = inputs['image_grid_thw'][0][1] * 14
549
+ input_width = inputs['image_grid_thw'][0][2] * 14
550
+ input_size = (input_width.item(), input_height.item())
551
+
552
+ batch_results = []
553
+ for response in responses:
554
+ batch_results.append({'response': response, 'input_size': input_size})
555
+
556
+ del inputs, output_ids, generated_ids
557
+ # torch.cuda.empty_cache()
558
+
559
+ return batch_results
560
+
561
+ def predict_batch(self, images, text_prompts, system_prompt, prompt_type='object'):
562
+ """Processes a batch of images and prompts to predict bounding boxes."""
563
+ if not images:
564
+ return []
565
+
566
+ # 1. Prepare all inputs for the batch
567
+ prompts = [self.prompt_manager.construct_prompt(tp, prompt_type) for tp in text_prompts]
568
+ images_for_message = [self.resize_image(img, self.max_size) for img in images]
569
+ original_sizes = [img.size for img in images]
570
+
571
+ # 2. Create message structures for the entire batch
572
+ batch_messages = []
573
+ for i in range(len(images)):
574
+ batch_messages.append([
575
+ {"role": "system", "content": system_prompt},
576
+ {"role": "user", "content": [{"type": "text", "text": prompts[i]}, {"type": "image"}]}
577
+ ])
578
+
579
+ # 3. Use the processor on the entire batch at once
580
+ text_for_processor = [self.processor.apply_chat_template(m, tokenize=False, add_generation_prompt=True) for m in batch_messages]
581
+ inputs = self.processor(
582
+ text=text_for_processor,
583
+ images=images_for_message,
584
+ padding=True,
585
+ return_tensors="pt"
586
+ ).to(self.model.device)
587
+
588
+ # 4. Generate outputs for the entire batch
589
+ output_ids = self.model.generate(**inputs, max_new_tokens=self.max_new_tokens)
590
+ generated_ids = [out[len(inp):] for inp, out in zip(inputs.input_ids, output_ids)]
591
+ responses = self.processor.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
592
+
593
+ # 5. Format results
594
+ input_height = inputs['image_grid_thw'][0][1] * 14
595
+ input_width = inputs['image_grid_thw'][0][2] * 14
596
+ input_size = (input_width.item(), input_height.item())
597
+
598
+ batch_results = []
599
+ for i, response in enumerate(responses):
600
+ bbox_with_label = self._get_bounding_boxes(response, input_size, original_sizes[i])
601
+ batch_results.append({
602
+ 'bbox_with_label': bbox_with_label,
603
+ 'response': response,
604
+ 'input_size': input_size
605
+ })
606
+
607
+ del inputs, output_ids, generated_ids
608
+ # torch.cuda.empty_cache()
609
+
610
+ return batch_results
611
+
612
+
613
+
614
+ # class PromptManager:
615
+ # """
616
+ # Builds a prompt that forces the LLM to return
617
+ # a *pure* JSON list of bounding-box dictionaries:
618
+
619
+ # [
620
+ # {"bbox_2d": [x1, y1, x2, y2], "label": "<your-label>"},
621
+ # ...
622
+ # ]
623
+ # """
624
+
625
+ # @staticmethod
626
+ # def construct_prompt(text_prompt: str, prompt_type: str = "object") -> str:
627
+ # """
628
+ # Parameters
629
+ # ----------
630
+ # target_desc : str
631
+ # Human-readable name of what you want boxed, e.g. "rear wheel of the motorbike".
632
+ # prompt_type : str
633
+ # Supported: "object" (build full bounding-box prompt)
634
+ # "self-handled" (return target_desc unchanged)
635
+ # """
636
+ # if prompt_type.lower() == "object":
637
+ # # ░░░ Prompt template ░░░
638
+ # prompt = f"""{text_prompt}
639
+ # Required format (copy this structure exactly):
640
+ # ```json
641
+ # [
642
+ # {{"bbox_2d": [x1, y1, x2, y2], "label": "<your-label>"}},
643
+ # ]
644
+ # ```
645
+ # """
646
+ # # Strip leading spaces so the first char is '[' once the model starts its answer
647
+ # return prompt
648
+
649
+ # elif prompt_type.lower() == "self-handled":
650
+ # return text_prompt
651
+ # else:
652
+ # raise NotImplementedError("prompt_type must be 'object' or 'self-handled'")
653
+
654
+
655
+ # import os
656
+ # from os import path
657
+ # from PIL import Image, ImageDraw, ImageFont
658
+ # from PIL import ImageColor
659
+ # import json
660
+ # import requests
661
+ # from io import BytesIO
662
+ # from urllib.parse import urlparse
663
+ # import pathlib
664
+
665
+ # import ast
666
+ # import torch
667
+ # from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor
668
+ # from qwen_vl_utils import process_vision_info
669
+
670
+ # # from .prompt_manager import PromptManager
671
+
672
+
673
+ # def load_image(source, image_path=None):
674
+ # """
675
+ # Load an image from a URL or local file path and return a PIL Image object.
676
+ # Optionally save the loaded image to a specified path.
677
+
678
+ # Args:
679
+ # source (str): URL or local file path to the image
680
+ # image_path (str, optional): Path where to save the loaded image
681
+
682
+ # Returns:
683
+ # PIL.Image: The loaded image or None if loading fails
684
+ # """
685
+ # try:
686
+ # # Check if the source is a URL or local path
687
+ # parsed = urlparse(source)
688
+ # is_url = bool(parsed.scheme and parsed.netloc)
689
+
690
+ # if is_url:
691
+ # # Handle URL
692
+ # response = requests.get(source, stream=True)
693
+ # response.raise_for_status()
694
+ # img = Image.open(BytesIO(response.content))
695
+ # else:
696
+ # # Handle local file path
697
+ # if path.exists(source):
698
+ # img = Image.open(source)
699
+ # else:
700
+ # print(f"Error: Local file not found at {source}")
701
+ # return None
702
+
703
+ # # Save the image if image_path is provided
704
+ # if image_path is not None:
705
+ # # Make sure the directory exists
706
+ # directory = path.dirname(image_path)
707
+ # if directory and not path.exists(directory):
708
+ # pathlib.Path(directory).mkdir(parents=True, exist_ok=True)
709
+
710
+ # # Save the image in the appropriate format based on file extension
711
+ # img.save(image_path)
712
+ # print(f"Image saved to {image_path}")
713
+
714
+ # return img
715
+
716
+ # except Exception as e:
717
+ # print(f"Error loading image: {e}")
718
+ # return None
719
+
720
+
721
+ # class QwenVLDetector():
722
+ # # Qwen 2.5 VL
723
+
724
+ # def __init__(self, model_dir=None, torch_dtype=torch.bfloat16, device="cuda:0", model_name="Qwen/Qwen2.5-VL-7B-Instruct", flash_attn=False):
725
+ # if model_dir is not None:
726
+ # # Load model and processor from local directory
727
+ # self.model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
728
+ # model_dir,
729
+ # torch_dtype=torch_dtype,
730
+ # device_map=device, # Explicitly use first GPU
731
+ # local_files_only=True # Ensures it only looks for local files
732
+ # )
733
+ # self.processor = AutoProcessor.from_pretrained(model_dir)
734
+ # else:
735
+ # if flash_attn:
736
+ # self.model = Qwen2_5_VLForConditionalGeneration.from_pretrained(model_name, torch_dtype=torch_dtype, device_map=device, attn_implementation="flash_attention_2")
737
+ # else:
738
+ # self.model = Qwen2_5_VLForConditionalGeneration.from_pretrained(model_name, torch_dtype=torch_dtype, device_map=device)
739
+ # self.processor = AutoProcessor.from_pretrained(model_name)
740
+
741
+ # # self.system_prompt = system_prompt
742
+ # self.prompt_manager = PromptManager()
743
+ # self._init_parameters()
744
+
745
+ # def _init_parameters(self):
746
+ # self.max_size = 1024
747
+ # self.max_new_tokens = 1024
748
+ # self.min_pixels = 512 * 512
749
+ # self.max_pixels = self.max_size * self.max_size
750
+
751
+ # def resize_image(self, img, max_size):
752
+ # # Get original dimensions
753
+ # width, height = img.size
754
+ # # If image already satisfies the size limit, return original
755
+ # if max(width, height) <= max_size:
756
+ # return img
757
+
758
+ # # Calculate scaling factor
759
+ # if width >= height:
760
+ # # Width is the long edge
761
+ # scaling_factor = max_size / width
762
+ # new_width = max_size
763
+ # new_height = int(height * scaling_factor)
764
+ # else:
765
+ # # Height is the long edge
766
+ # scaling_factor = max_size / height
767
+ # new_height = max_size
768
+ # new_width = int(width * scaling_factor)
769
+
770
+ # # Resize image
771
+ # resized_img = img.resize((new_width, new_height), Image.Resampling.LANCZOS)
772
+ # return resized_img
773
+
774
+ # # @title Parsing JSON output
775
+ # def _parse_json(self, json_output):
776
+ # # Parsing out the markdown fencing
777
+ # lines = json_output.splitlines()
778
+ # for i, line in enumerate(lines):
779
+ # if line == "```json":
780
+ # json_output = "\n".join(lines[i+1:]) # Remove everything before "```json"
781
+ # json_output = json_output.split("```")[0] # Remove everything after the closing "```"
782
+ # return json_output
783
+ # return None # Bounding box not found
784
+
785
+ # def calculate_iou(self, box1, box2):
786
+ # """
787
+ # Calculate the Intersection over Union (IoU) between two bounding boxes.
788
+
789
+ # Args:
790
+ # box1 (list): First bounding box in format [x1, y1, x2, y2]
791
+ # where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner
792
+ # box2 (list): Second bounding box in same format
793
+
794
+ # Returns:
795
+ # float: IoU value between 0 and 1
796
+ # """
797
+ # # Extract coordinates
798
+ # x1_1, y1_1, x2_1, y2_1 = box1
799
+ # x1_2, y1_2, x2_2, y2_2 = box2
800
+
801
+ # # Calculate area of each bounding box
802
+ # area1 = (x2_1 - x1_1) * (y2_1 - y1_1)
803
+ # area2 = (x2_2 - x1_2) * (y2_2 - y1_2)
804
+
805
+ # # Calculate coordinates of intersection
806
+ # x1_i = max(x1_1, x1_2)
807
+ # y1_i = max(y1_1, y1_2)
808
+ # x2_i = min(x2_1, x2_2)
809
+ # y2_i = min(y2_1, y2_2)
810
+
811
+ # # Check if there is an intersection
812
+ # if x2_i <= x1_i or y2_i <= y1_i:
813
+ # return 0.0
814
+
815
+ # # Calculate intersection area
816
+ # intersection_area = (x2_i - x1_i) * (y2_i - y1_i)
817
+
818
+ # # Calculate union area (sum of areas - intersection)
819
+ # union_area = area1 + area2 - intersection_area
820
+
821
+ # # Calculate IoU
822
+ # iou = intersection_area / union_area
823
+
824
+ # return iou
825
+
826
+ # def _check_bbox(self, bbox_with_label, bbox_coor, size, threshold=0.7):
827
+ # x1, y1, x2, y2 = bbox_coor
828
+ # width, height = size
829
+ # if (x2 - x1) * (y2 - y1) / width / height >= threshold:
830
+ # return False
831
+
832
+ # for idx, bbox_label in bbox_with_label.items():
833
+ # iou = self.calculate_iou(bbox_label['bbox'], bbox_coor)
834
+ # if iou >= threshold:
835
+ # return False
836
+
837
+ # return True
838
+
839
+ # def _get_bounding_boxes(self, response, input_size, size):
840
+ # # Parsing out the markdown fencing
841
+ # bounding_boxes = self._parse_json(response)
842
+ # if bounding_boxes is None:
843
+ # return dict()
844
+
845
+ # input_width, input_height = input_size
846
+ # width, height = size
847
+
848
+ # try:
849
+ # json_output = ast.literal_eval(bounding_boxes)
850
+ # except Exception as e:
851
+ # end_idx = bounding_boxes.rfind('"}') + len('"}')
852
+ # truncated_text = bounding_boxes[:end_idx] + "]"
853
+ # json_output = ast.literal_eval(truncated_text)
854
+
855
+ # # Iterate over the bounding boxes
856
+ # bbox_with_label = dict()
857
+
858
+ # if len(json_output) > 0:
859
+ # for i, bounding_box in enumerate(json_output):
860
+ # try:
861
+
862
+ # # Convert normalized coordinates to absolute coordinates
863
+ # abs_x1 = int(bounding_box["bbox_2d"][0] / input_width * width)
864
+ # abs_y1 = int(bounding_box["bbox_2d"][1] / input_height * height)
865
+ # abs_x2 = int(bounding_box["bbox_2d"][2] / input_width * width)
866
+ # abs_y2 = int(bounding_box["bbox_2d"][3] / input_height * height)
867
+
868
+ # if abs_x1 > abs_x2:
869
+ # abs_x1, abs_x2 = abs_x2, abs_x1
870
+
871
+ # if abs_y1 > abs_y2:
872
+ # abs_y1, abs_y2 = abs_y2, abs_y1
873
+
874
+ # if self._check_bbox(bbox_with_label, [abs_x1, abs_y1, abs_x2, abs_y2], size):
875
+ # bbox_with_label[i] = {'bbox': [abs_x1, abs_y1, abs_x2, abs_y2], 'label': bounding_box.get('label')}
876
+
877
+ # except Exception as e:
878
+ # print(f"Error {str(e)} for bounding box {bounding_box}")
879
+
880
+ # return bbox_with_label
881
+
882
+ # def _bbox_refine(self, response_org, messages, image_type, image_for_message):
883
+ # bbox_text = self._parse_json(response_org)
884
+ # if bbox_text is None:
885
+ # return response_org
886
+
887
+ # prompt_type = 'bbox_refine'
888
+ # text_prompt = self.prompt_manager.construct_prompt(bbox_text, prompt_type)
889
+ # messages[1]['content'][0]['text'] = text_prompt
890
+
891
+ # if image_type == 'str':
892
+ # text = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
893
+ # image_inputs, video_inputs = process_vision_info(messages)
894
+ # inputs = self.processor(text=[text],
895
+ # images=image_inputs,
896
+ # videos=video_inputs,
897
+ # padding=True,
898
+ # return_tensors="pt"
899
+ # ).to(self.model.device)
900
+ # elif image_type == 'pil':
901
+ # # Assume it's already a PIL Image
902
+ # messages[1]['content'][1]['image'] = image_for_message
903
+ # text = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
904
+ # inputs = self.processor(text=[text], images=[image_for_message], padding=True, return_tensors="pt").to(self.model.device)
905
+ # else:
906
+ # raise NotImplementedError
907
+
908
+ # output_ids = self.model.generate(**inputs, max_new_tokens=self.max_new_tokens)
909
+ # generated_ids = [output_ids[len(input_ids):] for input_ids, output_ids in zip(inputs.input_ids, output_ids)]
910
+ # output_text = self.processor.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
911
+ # response = output_text[0]
912
+
913
+ # del text, inputs, output_ids, generated_ids, output_text
914
+ # torch.cuda.empty_cache()
915
+
916
+ # return response
917
+
918
+ # def chat(self, image, text_prompt, system_prompt, prompt_type):
919
+ # self.text_prompt = self.prompt_manager.construct_prompt(text_prompt, prompt_type)
920
+
921
+ # # Handle the image input: either load from URL or use the PIL Image directly
922
+ # if isinstance(image, str):
923
+ # # It's a URL or path, load the image
924
+ # image_type = 'str'
925
+ # image_pil = load_image(image)
926
+ # size = image_pil.size
927
+ # messages = [
928
+ # {
929
+ # "role": "system",
930
+ # "content": system_prompt
931
+ # },
932
+ # {
933
+ # "role": "user",
934
+ # "content": [
935
+ # {
936
+ # "type": "text",
937
+ # "text": self.text_prompt
938
+ # },
939
+ # {
940
+ # "type": "image",
941
+ # "image": image,
942
+ # "min_pixels": self.min_pixels,
943
+ # "max_pixels": self.max_pixels
944
+ # }
945
+ # ]
946
+ # }
947
+ # ]
948
+
949
+ # text = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
950
+ # image_inputs, video_inputs = process_vision_info(messages)
951
+ # inputs = self.processor(text=[text],
952
+ # images=image_inputs,
953
+ # videos=video_inputs,
954
+ # padding=True,
955
+ # return_tensors="pt"
956
+ # ).to(self.model.device)
957
+
958
+ # else:
959
+ # image_type = 'pil'
960
+ # # Assume it's already a PIL Image
961
+ # image_pil = image
962
+ # # For the message content, we need to use the image directly
963
+
964
+ # size = image_pil.size
965
+ # image_for_message = self.resize_image(image_pil, self.max_size)
966
+
967
+ # messages = [
968
+ # {
969
+ # "role": "system",
970
+ # "content": system_prompt
971
+ # },
972
+ # {
973
+ # "role": "user",
974
+ # "content": [
975
+ # {
976
+ # "type": "text",
977
+ # "text": self.text_prompt
978
+ # },
979
+ # {
980
+ # "type": "image",
981
+ # "image": image_for_message
982
+ # }
983
+ # ]
984
+ # }
985
+ # ]
986
+
987
+ # text = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
988
+ # inputs = self.processor(text=[text], images=[image_for_message], padding=True, return_tensors="pt").to(self.model.device)
989
+
990
+ # output_ids = self.model.generate(**inputs, max_new_tokens=self.max_new_tokens)
991
+ # generated_ids = [output_ids[len(input_ids):] for input_ids, output_ids in zip(inputs.input_ids, output_ids)]
992
+ # output_text = self.processor.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
993
+ # response = output_text[0]
994
+
995
+ # input_height = inputs['image_grid_thw'][0][1] * 14
996
+ # input_width = inputs['image_grid_thw'][0][2] * 14
997
+ # input_size = (input_width.item(), input_height.item())
998
+
999
+ # del text, inputs, output_ids, generated_ids, output_text
1000
+ # torch.cuda.empty_cache()
1001
+
1002
+ # return {'response': response, 'input_size': input_size}
1003
+
1004
+ # def predict(self, image, text_prompt, system_prompt, prompt_type='object', bbox_refine=False):
1005
+ # self.text_prompt = self.prompt_manager.construct_prompt(text_prompt, prompt_type)
1006
+
1007
+ # # Handle the image input: either load from URL or use the PIL Image directly
1008
+ # if isinstance(image, str):
1009
+ # # It's a URL or path, load the image
1010
+ # image_type = 'str'
1011
+ # image_pil = load_image(image)
1012
+ # size = image_pil.size
1013
+ # image_for_message = image
1014
+ # messages = [
1015
+ # {
1016
+ # "role": "system",
1017
+ # "content": system_prompt
1018
+ # },
1019
+ # {
1020
+ # "role": "user",
1021
+ # "content": [
1022
+ # {
1023
+ # "type": "text",
1024
+ # "text": self.text_prompt
1025
+ # },
1026
+ # {
1027
+ # "type": "image",
1028
+ # "image": image,
1029
+ # "min_pixels": self.min_pixels,
1030
+ # "max_pixels": self.max_pixels
1031
+ # }
1032
+ # ]
1033
+ # }
1034
+ # ]
1035
+
1036
+ # text = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
1037
+ # image_inputs, video_inputs = process_vision_info(messages)
1038
+ # inputs = self.processor(text=[text],
1039
+ # images=image_inputs,
1040
+ # videos=video_inputs,
1041
+ # padding=True,
1042
+ # return_tensors="pt"
1043
+ # ).to(self.model.device)
1044
+
1045
+ # else:
1046
+ # image_type = 'pil'
1047
+ # # Assume it's already a PIL Image
1048
+ # image_pil = image
1049
+ # # For the message content, we need to use the image directly
1050
+
1051
+ # size = image_pil.size
1052
+ # image_for_message = self.resize_image(image_pil, self.max_size)
1053
+
1054
+ # messages = [
1055
+ # {
1056
+ # "role": "system",
1057
+ # "content": system_prompt
1058
+ # },
1059
+ # {
1060
+ # "role": "user",
1061
+ # "content": [
1062
+ # {
1063
+ # "type": "text",
1064
+ # "text": self.text_prompt
1065
+ # },
1066
+ # {
1067
+ # "type": "image",
1068
+ # "image": image_for_message
1069
+ # }
1070
+ # ]
1071
+ # }
1072
+ # ]
1073
+
1074
+ # text = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
1075
+ # inputs = self.processor(text=[text], images=[image_for_message], padding=True, return_tensors="pt").to(self.model.device)
1076
+
1077
+ # output_ids = self.model.generate(**inputs, max_new_tokens=self.max_new_tokens)
1078
+ # generated_ids = [output_ids[len(input_ids):] for input_ids, output_ids in zip(inputs.input_ids, output_ids)]
1079
+ # output_text = self.processor.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
1080
+
1081
+ # input_height = inputs['image_grid_thw'][0][1]*14
1082
+ # input_width = inputs['image_grid_thw'][0][2]*14
1083
+
1084
+ # response_org = output_text[0]
1085
+ # input_size = (input_width.item(), input_height.item())
1086
+
1087
+ # del text, inputs, output_ids, generated_ids, output_text
1088
+ # torch.cuda.empty_cache()
1089
+
1090
+ # if bbox_refine:
1091
+ # response = self._bbox_refine(response_org, messages, image_type, image_for_message)
1092
+ # else:
1093
+ # response = response_org
1094
+
1095
+ # results = dict()
1096
+ # bbox_with_label = self._get_bounding_boxes(response, input_size, size)
1097
+ # results = {'bbox_with_label': bbox_with_label, 'response': response, 'input_size': input_size}
1098
+
1099
+ # return results
1100
+
1101
+ # def plot_bounding_boxes(self, image, response, input_size, output_path='./temp.png'):
1102
+
1103
+ # # Load the image
1104
+ # img = image.copy()
1105
+ # width, height = img.size
1106
+ # input_width, input_height = input_size
1107
+
1108
+ # # Create a drawing object
1109
+ # draw = ImageDraw.Draw(img)
1110
+
1111
+ # additional_colors = [colorname for (colorname, colorcode) in ImageColor.colormap.items()]
1112
+
1113
+ # # Define a list of colors
1114
+ # colors = [
1115
+ # 'red',
1116
+ # 'green',
1117
+ # 'blue',
1118
+ # 'yellow',
1119
+ # 'orange',
1120
+ # 'pink',
1121
+ # 'purple',
1122
+ # 'brown',
1123
+ # 'gray',
1124
+ # 'beige',
1125
+ # 'turquoise',
1126
+ # 'cyan',
1127
+ # 'magenta',
1128
+ # 'lime',
1129
+ # 'navy',
1130
+ # 'maroon',
1131
+ # 'teal',
1132
+ # 'olive',
1133
+ # 'coral',
1134
+ # 'lavender',
1135
+ # 'violet',
1136
+ # 'gold',
1137
+ # 'silver',
1138
+ # ] + additional_colors
1139
+
1140
+ # # Parsing out the markdown fencing
1141
+ # bounding_boxes = self._parse_json(response)
1142
+
1143
+ # # font = ImageFont.truetype("NotoSansCJK-Regular.ttc", size=14)
1144
+ # font = ImageFont.load_default()
1145
+
1146
+ # # print(f"Bounding boxes: {bounding_boxes}")
1147
+
1148
+ # try:
1149
+ # # print(f"Bounding boxes: {bounding_boxes}")
1150
+ # if bounding_boxes is None:
1151
+ # print("No bounding boxes found in the response.")
1152
+ # return
1153
+ # json_output = ast.literal_eval(bounding_boxes)
1154
+ # except Exception as e:
1155
+ # end_idx = bounding_boxes.rfind('"}') + len('"}')
1156
+ # truncated_text = bounding_boxes[:end_idx] + "]"
1157
+ # json_output = ast.literal_eval(truncated_text)
1158
+
1159
+ # print(f"Parsed JSON output: {json_output}")
1160
+
1161
+ # if len(json_output) > 0:
1162
+
1163
+ # # Iterate over the bounding boxes
1164
+ # for i, bounding_box in enumerate(json_output):
1165
+ # try:
1166
+ # # Select a color from the list
1167
+ # color = colors[i % len(colors)]
1168
+ # # Convert normalized coordinates to absolute coordinates
1169
+ # abs_x1 = int(bounding_box["bbox_2d"][0] / input_width * width)
1170
+ # abs_y1 = int(bounding_box["bbox_2d"][1] / input_height * height)
1171
+ # abs_x2 = int(bounding_box["bbox_2d"][2] / input_width * width)
1172
+ # abs_y2 = int(bounding_box["bbox_2d"][3] / input_height * height)
1173
+
1174
+ # if abs_x1 > abs_x2:
1175
+ # abs_x1, abs_x2 = abs_x2, abs_x1
1176
+
1177
+ # if abs_y1 > abs_y2:
1178
+ # abs_y1, abs_y2 = abs_y2, abs_y1
1179
+
1180
+ # # Draw the bounding box
1181
+ # draw.rectangle(
1182
+ # ((abs_x1, abs_y1), (abs_x2, abs_y2)), outline=color, width=4
1183
+ # )
1184
+
1185
+ # # Draw the text
1186
+ # if "label" in bounding_box:
1187
+ # draw.text((abs_x1 + 8, abs_y1 + 6), bounding_box["label"], fill=color, font=font)
1188
+ # except Exception as e:
1189
+ # print(f"Error {str(e)} for bounding box {bounding_box}")
1190
+
1191
+ # img.save(output_path)
1192
+ # print(f"Image with bounding boxes saved to: {output_path}")
1193
+
1194
+
1195
+ # def predict_bounding_boxes(self, image, response, input_size):
1196
+
1197
+ # # Load the image
1198
+ # img = image.copy()
1199
+ # width, height = img.size
1200
+ # input_width, input_height = input_size
1201
+
1202
+ # # Create a drawing object
1203
+ # # draw = ImageDraw.Draw(img)
1204
+
1205
+ # additional_colors = [colorname for (colorname, colorcode) in ImageColor.colormap.items()]
1206
+
1207
+ # # # Define a list of colors
1208
+ # # colors = [
1209
+ # # 'red',
1210
+ # # 'green',
1211
+ # # 'blue',
1212
+ # # 'yellow',
1213
+ # # 'orange',
1214
+ # # 'pink',
1215
+ # # 'purple',
1216
+ # # 'brown',
1217
+ # # 'gray',
1218
+ # # 'beige',
1219
+ # # 'turquoise',
1220
+ # # 'cyan',
1221
+ # # 'magenta',
1222
+ # # 'lime',
1223
+ # # 'navy',
1224
+ # # 'maroon',
1225
+ # # 'teal',
1226
+ # # 'olive',
1227
+ # # 'coral',
1228
+ # # 'lavender',
1229
+ # # 'violet',
1230
+ # # 'gold',
1231
+ # # 'silver',
1232
+ # # ] + additional_colors
1233
+
1234
+ # # Parsing out the markdown fencing
1235
+ # bounding_boxes = self._parse_json(response)
1236
+
1237
+ # # # font = ImageFont.truetype("NotoSansCJK-Regular.ttc", size=14)
1238
+ # # font = ImageFont.load_default()
1239
+
1240
+ # # print(f"Bounding boxes: {bounding_boxes}")
1241
+
1242
+ # try:
1243
+ # # print(f"Bounding boxes: {bounding_boxes}")
1244
+ # if bounding_boxes is None:
1245
+ # print("No bounding boxes found in the response.")
1246
+ # return
1247
+ # json_output = ast.literal_eval(bounding_boxes)
1248
+ # except Exception as e:
1249
+ # end_idx = bounding_boxes.rfind('"}') + len('"}')
1250
+ # truncated_text = bounding_boxes[:end_idx] + "]"
1251
+ # json_output = ast.literal_eval(truncated_text)
1252
+
1253
+ # print(f"Parsed JSON output: {json_output}")
1254
+
1255
+ # if len(json_output) > 0:
1256
+
1257
+ # # Iterate over the bounding boxes
1258
+ # for i, bounding_box in enumerate(json_output):
1259
+ # try:
1260
+ # # # Select a color from the list
1261
+ # # color = colors[i % len(colors)]
1262
+ # # Convert normalized coordinates to absolute coordinates
1263
+ # abs_x1 = int(bounding_box["bbox_2d"][0] / input_width * width)
1264
+ # abs_y1 = int(bounding_box["bbox_2d"][1] / input_height * height)
1265
+ # abs_x2 = int(bounding_box["bbox_2d"][2] / input_width * width)
1266
+ # abs_y2 = int(bounding_box["bbox_2d"][3] / input_height * height)
1267
+
1268
+ # if abs_x1 > abs_x2:
1269
+ # abs_x1, abs_x2 = abs_x2, abs_x1
1270
+
1271
+ # if abs_y1 > abs_y2:
1272
+ # abs_y1, abs_y2 = abs_y2, abs_y1
1273
+
1274
+ # # # Draw the bounding box
1275
+ # # draw.rectangle(
1276
+ # # ((abs_x1, abs_y1), (abs_x2, abs_y2)), outline=color, width=4
1277
+ # # )
1278
+
1279
+ # # # Draw the text
1280
+ # # if "label" in bounding_box:
1281
+ # # draw.text((abs_x1 + 8, abs_y1 + 6), bounding_box["label"], fill=color, font=font)
1282
+
1283
+ # return [abs_x1, abs_y1, abs_x2, abs_y2]
1284
+ # except Exception as e:
1285
+ # print(f"Error {str(e)} for bounding box {bounding_box}")
1286
+
1287
+ # # img.save(output_path)
1288
+ # # print(f"Image with bounding boxes saved to: {output_path}")
1289
+
Code/qwen_inference/utils.py ADDED
@@ -0,0 +1,829 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import torch
3
+ import torch.nn.functional as F
4
+ from torch import Tensor
5
+ import numpy as np
6
+
7
+ def resize_and_pad(image_tensor, output_size):
8
+ """
9
+ Resizes an image tensor to a square shape by scaling and padding.
10
+
11
+ Args:
12
+ image_tensor (torch.Tensor): Input image tensor of shape (H, W).
13
+ output_size (int): The desired square output size.
14
+
15
+ Returns:
16
+ torch.Tensor: The resized and padded image tensor of shape (output_size, output_size).
17
+ """
18
+ original_h, original_w = image_tensor.shape
19
+
20
+ # 1. Calculate the scale factor to fit the longest side to output_size
21
+ scale = output_size / max(original_h, original_w)
22
+ new_h, new_w = int(original_h * scale), int(original_w * scale)
23
+
24
+ # Add batch and channel dimensions for F.interpolate
25
+ image_tensor = image_tensor.unsqueeze(0).unsqueeze(0)
26
+
27
+ # 2. Resize the image, preserving aspect ratio
28
+ resized_image = F.interpolate(image_tensor, size=(new_h, new_w), mode='bilinear', align_corners=False)
29
+
30
+ # 3. Calculate padding for the shorter side
31
+ pad_h = output_size - new_h
32
+ pad_w = output_size - new_w
33
+
34
+ # Padding format is (left, right, top, bottom)
35
+ padding = (pad_w // 2, pad_w - (pad_w // 2), pad_h // 2, pad_h - (pad_h // 2))
36
+
37
+ # 4. Pad the image with a constant value (0 for black)
38
+ padded_image = F.pad(resized_image, padding, "constant", 0)
39
+
40
+ return padded_image.squeeze()
41
+
42
+
43
+ def resize(img, target_res=224, resize=True, to_pil=True, edge=False):
44
+ original_width, original_height = img.size
45
+ original_channels = len(img.getbands())
46
+ if not edge:
47
+ canvas = np.zeros([target_res, target_res, 3], dtype=np.uint8)
48
+ if original_channels == 1:
49
+ canvas = np.zeros([target_res, target_res], dtype=np.uint8)
50
+ if original_height <= original_width:
51
+ if resize:
52
+ img = img.resize((target_res, int(np.around(target_res * original_height / original_width))), Image.Resampling.LANCZOS)
53
+ width, height = img.size
54
+ img = np.asarray(img)
55
+ canvas[(width - height) // 2: (width + height) // 2] = img
56
+ else:
57
+ if resize:
58
+ img = img.resize((int(np.around(target_res * original_width / original_height)), target_res), Image.Resampling.LANCZOS)
59
+ width, height = img.size
60
+ img = np.asarray(img)
61
+ canvas[:, (height - width) // 2: (height + width) // 2] = img
62
+ else:
63
+ if original_height <= original_width:
64
+ if resize:
65
+ img = img.resize((target_res, int(np.around(target_res * original_height / original_width))), Image.Resampling.LANCZOS)
66
+ width, height = img.size
67
+ img = np.asarray(img)
68
+ top_pad = (target_res - height) // 2
69
+ bottom_pad = target_res - height - top_pad
70
+ img = np.pad(img, pad_width=[(top_pad, bottom_pad), (0, 0), (0, 0)], mode='edge')
71
+ else:
72
+ if resize:
73
+ img = img.resize((int(np.around(target_res * original_width / original_height)), target_res), Image.Resampling.LANCZOS)
74
+ width, height = img.size
75
+ img = np.asarray(img)
76
+ left_pad = (target_res - width) // 2
77
+ right_pad = target_res - width - left_pad
78
+ img = np.pad(img, pad_width=[(0, 0), (left_pad, right_pad), (0, 0)], mode='edge')
79
+ canvas = img
80
+ if to_pil:
81
+ canvas = Image.fromarray(canvas)
82
+ return canvas
83
+
84
+ def scaled_shifted_sigmoid(
85
+ x: Tensor,
86
+ a: float = 1.0, # vertical scale
87
+ b: float = 1.0, # slope (steepness)
88
+ c: float = 0.0, # horizontal shift (bias)
89
+ d: float = 0.0, # vertical shift (baseline)
90
+ ) -> Tensor:
91
+ """
92
+ Compute a scaled-and-shifted sigmoid: y = a * sigmoid(b * x + c) + d.
93
+
94
+ Parameters
95
+ ----------
96
+ x : torch.Tensor
97
+ Input tensor.
98
+ a : float, default 1.0
99
+ Output scale (amplitude).
100
+ b : float, default 1.0
101
+ Input scale (controls slope).
102
+ c : float, default 0.0
103
+ Input shift (horizontal translation).
104
+ d : float, default 0.0
105
+ Output shift (vertical translation).
106
+
107
+ Returns
108
+ -------
109
+ torch.Tensor
110
+ Tensor with the same shape as `x` after applying the transformation.
111
+ """
112
+ return a * torch.sigmoid(b * x + c) + d
113
+
114
+
115
+ ############
116
+ # for 2D to 3D correspondence with cropping
117
+
118
+ from scipy.ndimage import distance_transform_edt as edt
119
+ from scipy.ndimage import gaussian_filter
120
+ # from skimage import img_as_ubyte
121
+ from PIL import Image
122
+ from pathlib import Path
123
+ import numpy as np
124
+ from typing import Tuple
125
+
126
+ # ✨ New helper to find the object's bounding box from transparency
127
+ def get_bbox_from_alpha(image_path: Path) -> Tuple[int, int, int, int]:
128
+ """Calculates a bounding box from the alpha channel of a PNG."""
129
+ with Image.open(image_path).convert("RGBA") as img:
130
+ alpha = np.array(img)[:, :, 3]
131
+ non_transparent_pixels = np.argwhere(alpha > 0)
132
+ y_min, x_min = non_transparent_pixels.min(axis=0)
133
+ y_max, x_max = non_transparent_pixels.max(axis=0)
134
+ return x_min, y_min, x_max, y_max
135
+
136
+ # ... (rest of your imports and functions)
137
+
138
+
139
+
140
+ #####################
141
+ # dataset utils loading functions
142
+ #####################
143
+ import os
144
+ import json
145
+ import numpy as np
146
+ import pandas as pd
147
+ import torch
148
+ from glob import glob
149
+ # from scipy.io import loadmat as read_mat
150
+ import scipy.io as sio
151
+
152
+
153
+ def read_mat(path, obj_name):
154
+ r"""Reads specified objects from Matlab data file, (.mat)"""
155
+ mat_contents = sio.loadmat(path)
156
+ mat_obj = mat_contents[obj_name]
157
+
158
+ return mat_obj
159
+
160
+ def process_kps_pascal(kps):
161
+ # Step 1: Reshape the array to (20, 2) by adding nan values
162
+ num_pad_rows = 20 - kps.shape[0]
163
+ if num_pad_rows > 0:
164
+ pad_values = np.full((num_pad_rows, 2), np.nan)
165
+ kps = np.vstack((kps, pad_values))
166
+
167
+ # Step 2: Reshape the array to (20, 3)
168
+ # Add an extra column: set to 1 if the row does not contain nan, 0 otherwise
169
+ last_col = np.isnan(kps).any(axis=1)
170
+ last_col = np.where(last_col, 0, 1)
171
+ kps = np.column_stack((kps, last_col))
172
+
173
+ # Step 3: Replace rows with nan values to all 0's
174
+ mask = np.isnan(kps).any(axis=1)
175
+ kps[mask] = 0
176
+
177
+ return torch.tensor(kps).float()
178
+
179
+ def preprocess_kps_pad(kps, img_width, img_height, size):
180
+ # Once an image has been pre-processed via border (or zero) padding,
181
+ # the location of key points needs to be updated. This function applies
182
+ # that pre-processing to the key points so they are correctly located
183
+ # in the border-padded (or zero-padded) image.
184
+ kps = kps.clone()
185
+ scale = size / max(img_width, img_height)
186
+ kps[:, [0, 1]] *= scale
187
+ if img_height < img_width:
188
+ new_h = int(np.around(size * img_height / img_width))
189
+ offset_y = int((size - new_h) / 2)
190
+ offset_x = 0
191
+ kps[:, 1] += offset_y
192
+ elif img_width < img_height:
193
+ new_w = int(np.around(size * img_width / img_height))
194
+ offset_x = int((size - new_w) / 2)
195
+ offset_y = 0
196
+ kps[:, 0] += offset_x
197
+ else:
198
+ offset_x = 0
199
+ offset_y = 0
200
+ kps *= kps[:, 2:3].clone() # zero-out any non-visible key points
201
+ return kps, offset_x, offset_y, scale
202
+
203
+
204
+ def load_pascal_data(path="data/PF-dataset-PASCAL", size=256, category='cat', split='test', subsample=None):
205
+
206
+ def get_points(point_coords_list, idx):
207
+ X = np.fromstring(point_coords_list.iloc[idx, 0], sep=";")
208
+ Y = np.fromstring(point_coords_list.iloc[idx, 1], sep=";")
209
+ Xpad = -np.ones(20)
210
+ Xpad[: len(X)] = X
211
+ Ypad = -np.ones(20)
212
+ Ypad[: len(X)] = Y
213
+ Zmask = np.zeros(20)
214
+ Zmask[: len(X)] = 1
215
+ point_coords = np.concatenate(
216
+ (Xpad.reshape(1, 20), Ypad.reshape(1, 20), Zmask.reshape(1,20)), axis=0
217
+ )
218
+ # make arrays float tensor for subsequent processing
219
+ point_coords = torch.Tensor(point_coords.astype(np.float32))
220
+ return point_coords
221
+
222
+ np.random.seed(42)
223
+ files = []
224
+ kps = []
225
+ test_data = pd.read_csv(f'{path}/{split}_pairs_pf_pascal.csv')
226
+ cls = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
227
+ 'bus', 'car', 'cat', 'chair', 'cow',
228
+ 'diningtable', 'dog', 'horse', 'motorbike', 'person',
229
+ 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
230
+ cls_ids = test_data.iloc[:,2].values.astype("int") - 1
231
+ cat_id = cls.index(category)
232
+ subset_id = np.where(cls_ids == cat_id)[0]
233
+ # logger.info(f'Number of Pairs for {category} = {len(subset_id)}')
234
+ subset_pairs = test_data.iloc[subset_id,:]
235
+ src_img_names = np.array(subset_pairs.iloc[:,0])
236
+ trg_img_names = np.array(subset_pairs.iloc[:,1])
237
+ # print(src_img_names.shape, trg_img_names.shape)
238
+ if not split.startswith('train'):
239
+ point_A_coords = subset_pairs.iloc[:,3:5]
240
+ point_B_coords = subset_pairs.iloc[:,5:]
241
+ # print(point_A_coords.shape, point_B_coords.shape)
242
+ for i in range(len(src_img_names)):
243
+ src_fn= f'{path}/../{src_img_names[i]}'
244
+ trg_fn= f'{path}/../{trg_img_names[i]}'
245
+ src_size=Image.open(src_fn).size
246
+ trg_size=Image.open(trg_fn).size
247
+
248
+ if not split.startswith('train'):
249
+ point_coords_src = get_points(point_A_coords, i).transpose(1,0)
250
+ point_coords_trg = get_points(point_B_coords, i).transpose(1,0)
251
+ else:
252
+ src_anns = os.path.join(path, 'Annotations', category,
253
+ os.path.basename(src_fn))[:-4] + '.mat'
254
+ trg_anns = os.path.join(path, 'Annotations', category,
255
+ os.path.basename(trg_fn))[:-4] + '.mat'
256
+ point_coords_src = process_kps_pascal(read_mat(src_anns, 'kps'))
257
+ point_coords_trg = process_kps_pascal(read_mat(trg_anns, 'kps'))
258
+
259
+ # print(src_size)
260
+ source_kps, src_x, src_y, src_scale = preprocess_kps_pad(point_coords_src, src_size[0], src_size[1], size)
261
+ target_kps, trg_x, trg_y, trg_scale = preprocess_kps_pad(point_coords_trg, trg_size[0], trg_size[1], size)
262
+ kps.append(source_kps)
263
+ kps.append(target_kps)
264
+ files.append(src_fn)
265
+ files.append(trg_fn)
266
+
267
+ kps = torch.stack(kps)
268
+ used_kps, = torch.where(kps[:, :, 2].any(dim=0))
269
+ kps = kps[:, used_kps, :]
270
+ # logger.info(f'Final number of used key points: {kps.size(1)}')
271
+ return files, kps, None, used_kps
272
+
273
+
274
+ def load_spair_data(path="data/SPair-71k", size=256, category='cat', split='test', subsample=None):
275
+ np.random.seed(42)
276
+ pairs = sorted(glob(f'{path}/PairAnnotation/{split}/*:{category}.json'))
277
+ if subsample is not None and subsample > 0:
278
+ pairs = [pairs[ix] for ix in np.random.choice(len(pairs), subsample)]
279
+ files = []
280
+ thresholds = []
281
+ kps = []
282
+ category_anno = list(glob(f'{path}/ImageAnnotation/{category}/*.json'))[0]
283
+ with open(category_anno) as f:
284
+ num_kps = len(json.load(f)['kps'])
285
+ for pair in pairs:
286
+ source_kps = torch.zeros(num_kps, 3)
287
+ target_kps = torch.zeros(num_kps, 3)
288
+ with open(pair) as f:
289
+ data = json.load(f)
290
+ assert category == data["category"]
291
+ source_fn = f'{path}/JPEGImages/{category}/{data["src_imname"]}'
292
+ target_fn = f'{path}/JPEGImages/{category}/{data["trg_imname"]}'
293
+ source_json_name = source_fn.replace('JPEGImages','ImageAnnotation').replace('jpg','json')
294
+ target_json_name = target_fn.replace('JPEGImages','ImageAnnotation').replace('jpg','json')
295
+ source_bbox = np.asarray(data["src_bndbox"]) # (x1, y1, x2, y2)
296
+ target_bbox = np.asarray(data["trg_bndbox"])
297
+ with open(source_json_name) as f:
298
+ file = json.load(f)
299
+ kpts_src = file['kps']
300
+ with open(target_json_name) as f:
301
+ file = json.load(f)
302
+ kpts_trg = file['kps']
303
+
304
+ source_size = data["src_imsize"][:2] # (W, H)
305
+ target_size = data["trg_imsize"][:2] # (W, H)
306
+
307
+ for i in range(30):
308
+ point = kpts_src[str(i)]
309
+ if point is None:
310
+ source_kps[i, :3] = 0
311
+ else:
312
+ source_kps[i, :2] = torch.Tensor(point).float() # set x and y
313
+ source_kps[i, 2] = 1
314
+ source_kps, src_x, src_y, src_scale = preprocess_kps_pad(source_kps, source_size[0], source_size[1], size)
315
+
316
+ for i in range(30):
317
+ point = kpts_trg[str(i)]
318
+ if point is None:
319
+ target_kps[i, :3] = 0
320
+ else:
321
+ target_kps[i, :2] = torch.Tensor(point).float()
322
+ target_kps[i, 2] = 1
323
+ # target_raw_kps = torch.cat([torch.tensor(data["trg_kps"], dtype=torch.float), torch.ones(kp_ixs.size(0), 1)], 1)
324
+ # target_kps = blank_kps.scatter(dim=0, index=kp_ixs, src=target_raw_kps)
325
+ target_kps, trg_x, trg_y, trg_scale = preprocess_kps_pad(target_kps, target_size[0], target_size[1], size)
326
+ if split == 'test' or split == 'val':
327
+ thresholds.append(max(target_bbox[3] - target_bbox[1], target_bbox[2] - target_bbox[0])*trg_scale)
328
+ elif split == 'trn':
329
+ thresholds.append(max(source_bbox[3] - source_bbox[1], source_bbox[2] - source_bbox[0])*src_scale)
330
+ thresholds.append(max(target_bbox[3] - target_bbox[1], target_bbox[2] - target_bbox[0])*trg_scale)
331
+
332
+ kps.append(source_kps)
333
+ kps.append(target_kps)
334
+ files.append(source_fn)
335
+ files.append(target_fn)
336
+ kps = torch.stack(kps)
337
+ used_kps, = torch.where(kps[:, :, 2].any(dim=0))
338
+ kps = kps[:, used_kps, :]
339
+
340
+ return files, kps, thresholds, used_kps
341
+
342
+
343
+ def load_specific_pascal_pair(
344
+ source_image_id: str,
345
+ target_image_id: str,
346
+ path: str = "data/PF-dataset-PASCAL",
347
+ size: int = 256,
348
+ split: str = 'test'
349
+ ):
350
+ """
351
+ Loads and processes a specific pair of source and target images from the PASCAL dataset.
352
+
353
+ Args:
354
+ source_image_id: The identifier of the source image (e.g., '2011_001407').
355
+ target_image_id: The identifier of the target image (e.g., '2010_004184').
356
+ path: The base path to the PF-PASCAL dataset directory.
357
+ size: The target size for preprocessing images.
358
+ split: The dataset split to use ('test', 'train', etc.).
359
+
360
+ Returns:
361
+ A tuple containing:
362
+ - files (list): A list with the full paths to the source and target images.
363
+ - kps (torch.Tensor): A tensor of processed keypoints for the image pair.
364
+ - None: A placeholder to match the original function's return format.
365
+ - used_kps_indices (torch.Tensor): A tensor of indices for keypoints present in either image.
366
+ """
367
+
368
+ def get_points_from_strings(x_str: str, y_str: str) -> torch.Tensor:
369
+ """Parses coordinate strings, pads them, and returns a tensor."""
370
+ X = np.fromstring(x_str, sep=";")
371
+ Y = np.fromstring(y_str, sep=";")
372
+
373
+ # Pad arrays to a fixed size of 20 (as in the original function)
374
+ Xpad = -np.ones(20)
375
+ Xpad[:len(X)] = X
376
+ Ypad = -np.ones(20)
377
+ Ypad[:len(Y)] = Y
378
+
379
+ # Create a mask for valid keypoints
380
+ Zmask = np.zeros(20)
381
+ Zmask[:len(X)] = 1
382
+
383
+ point_coords = np.stack((Xpad, Ypad, Zmask), axis=0)
384
+ return torch.from_numpy(point_coords.astype(np.float32))
385
+
386
+ # Construct the path to the CSV file and load it
387
+ csv_path = os.path.join(path, f'{split}_pairs_pf_pascal.csv')
388
+ try:
389
+ pairs_df = pd.read_csv(csv_path)
390
+ except FileNotFoundError:
391
+ print(f"Error: CSV file not found at '{csv_path}'")
392
+ return None, None, None, None
393
+
394
+ # Find the specific row matching the source and target image IDs
395
+ pair_row = pairs_df[
396
+ pairs_df['source_image'].str.contains(source_image_id) &
397
+ pairs_df['target_image'].str.contains(target_image_id)
398
+ ]
399
+
400
+ if pair_row.empty:
401
+ print(f"Error: Pair for source '{source_image_id}' and target '{target_image_id}' not found.")
402
+ return None, None, None, None
403
+
404
+ # Select the first match
405
+ pair_data = pair_row.iloc[0]
406
+
407
+ # Get full image paths and dimensions
408
+ src_fn = os.path.join(path, '..', pair_data['source_image'])
409
+ trg_fn = os.path.join(path, '..', pair_data['target_image'])
410
+
411
+ try:
412
+ src_size = Image.open(src_fn).size
413
+ trg_size = Image.open(trg_fn).size
414
+ except FileNotFoundError as e:
415
+ print(f"Error: Image file not found: {e.filename}")
416
+ return None, None, None, None
417
+
418
+ # Process keypoints based on the split type
419
+ if not split.startswith('train'):
420
+ point_coords_src = get_points_from_strings(pair_data['XA'], pair_data['YA']).T
421
+ point_coords_trg = get_points_from_strings(pair_data['XB'], pair_data['YB']).T
422
+ else:
423
+ # This logic for the 'train' split is preserved from the original function
424
+ cls_list = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car',
425
+ 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike',
426
+ 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
427
+ category = cls_list[pair_data['class'] - 1]
428
+
429
+ src_anns_path = os.path.join(path, 'Annotations', category, os.path.basename(src_fn).replace('.jpg', '.mat'))
430
+ trg_anns_path = os.path.join(path, 'Annotations', category, os.path.basename(trg_fn).replace('.jpg', '.mat'))
431
+
432
+ point_coords_src = process_kps_pascal(read_mat(src_anns_path, 'kps'))
433
+ point_coords_trg = process_kps_pascal(read_mat(trg_anns_path, 'kps'))
434
+
435
+ # Preprocess keypoints (e.g., padding and scaling)
436
+ source_kps, _, _, _ = preprocess_kps_pad(point_coords_src, src_size[0], src_size[1], size)
437
+ target_kps, _, _, _ = preprocess_kps_pad(point_coords_trg, trg_size[0], trg_size[1], size)
438
+
439
+ # Stack keypoints and find the indices of keypoints present in at least one image
440
+ kps = torch.stack([source_kps, target_kps])
441
+ used_kps_indices, = torch.where(kps[:, :, 2].any(dim=0))
442
+
443
+ # Filter the keypoints tensor to include only the used keypoints
444
+ kps_final = kps[:, used_kps_indices, :]
445
+
446
+ return [src_fn, trg_fn], kps_final, None, used_kps_indices
447
+
448
+ import matplotlib.pyplot as plt
449
+ def load_img_and_kps(idx, files, kps, img_size=224, edge=False, load_masked=False):
450
+ if load_masked:
451
+ img_rgba = Image.open(files[idx].replace('JPEGImages', 'JPEGImages_bgd_rmv').replace('.jpg', '_bgd_rmv.png')).convert('RGBA')
452
+
453
+ # img_rgba = Image.open(path_image).convert("RGBA")
454
+
455
+ # 2. create a white background and composite
456
+ img = Image.new("RGB", img_rgba.size, (0, 0, 0)) # choose any colour here
457
+ img.paste(img_rgba, mask=img_rgba.split()[3]) # mask = alpha channel
458
+ plt.imsave("img2_masked_before_resize.png", np.array(img))
459
+ # print(np.array(img).shape)
460
+ else:
461
+ img = Image.open(files[idx]).convert('RGB')
462
+ img = resize(img, img_size, resize=True, to_pil=True, edge=edge)
463
+ if load_masked:
464
+ plt.imsave("img2_masked_after_resize.png", np.array(img))
465
+ img_kps = kps[idx]
466
+
467
+ return img, img_kps
468
+
469
+
470
+ import os
471
+ import json
472
+ from glob import glob
473
+ import numpy as np
474
+ import torch
475
+
476
+ # NOTE: The helper function preprocess_kps_pad(kps, width, height, size)
477
+ # is assumed to be defined elsewhere, as in your original code.
478
+
479
+ def load_specific_spair_pair(
480
+ source_image_name: str,
481
+ target_image_name: str,
482
+ category: str,
483
+ path: str = "data/SPair-71k",
484
+ size: int = 256,
485
+ split: str = 'test',
486
+ unfiltered: bool = False
487
+
488
+ ):
489
+ """
490
+ Loads and processes a specific pair of images from the SPair-71k dataset.
491
+
492
+ Args:
493
+ source_image_name (str): Filename of the source image (e.g., '2008_002719.jpg').
494
+ target_image_name (str): Filename of the target image (e.g., '2008_004100.jpg').
495
+ category (str): The object category (e.g., 'aeroplane').
496
+ path (str): The base path to the SPair-71k dataset directory.
497
+ size (int): The target size for preprocessing images.
498
+ split (str): The dataset split to use ('test', 'trn', 'val').
499
+
500
+ Returns:
501
+ A tuple containing:
502
+ - files (list): Full paths to the source and target images.
503
+ - kps (torch.Tensor): Processed keypoints for the pair.
504
+ - thresholds (list): Bounding-box based thresholds for the pair.
505
+ - used_kps_indices (torch.Tensor): Indices of keypoints present in either image.
506
+ """
507
+
508
+ # Helper to create a keypoint tensor from the annotation dictionary
509
+ def _get_kps_tensor(kps_dict, num_kps):
510
+ kps_tensor = torch.zeros(num_kps, 3)
511
+ for i in range(num_kps):
512
+ point = kps_dict.get(str(i)) # Use .get() for safety
513
+ if point is not None:
514
+ kps_tensor[i, :2] = torch.tensor(point, dtype=torch.float)
515
+ kps_tensor[i, 2] = 1.0 # Mark as visible
516
+ return kps_tensor
517
+
518
+ # --- 1. Find the correct pair annotation file ---
519
+ pair_annotation_path = os.path.join(path, 'PairAnnotation', split)
520
+ candidate_files = glob(os.path.join(pair_annotation_path, f'*:{category}.json'))
521
+
522
+ pair_data = None
523
+ for file_path in candidate_files:
524
+ with open(file_path) as f:
525
+ data = json.load(f)
526
+ if data['src_imname'] == source_image_name and data['trg_imname'] == target_image_name:
527
+ pair_data = data
528
+ break
529
+
530
+ if pair_data is None:
531
+ print(f"Error: Pair for '{source_image_name}' and '{target_image_name}' not found.")
532
+ return None, None, None, None
533
+
534
+ # --- 2. Process the found pair ---
535
+ source_fn = os.path.join(path, 'JPEGImages', category, pair_data['src_imname'])
536
+ target_fn = os.path.join(path, 'JPEGImages', category, pair_data['trg_imname'])
537
+ files = [source_fn, target_fn]
538
+
539
+ # Get total number of keypoints for the category
540
+ try:
541
+ category_anno_path = glob(os.path.join(path, 'ImageAnnotation', category, '*.json'))[0]
542
+ with open(category_anno_path) as f:
543
+ num_kps = len(json.load(f)['kps'])
544
+ except IndexError:
545
+ print(f"Error: No image annotations found for category '{category}'.")
546
+ return None, None, None, None
547
+
548
+ # Get keypoints from individual image annotation files
549
+ source_json_path = source_fn.replace('JPEGImages', 'ImageAnnotation').replace('.jpg', '.json')
550
+ target_json_path = target_fn.replace('JPEGImages', 'ImageAnnotation').replace('.jpg', '.json')
551
+
552
+ with open(source_json_path) as f:
553
+ kpts_src_dict = json.load(f)['kps']
554
+ with open(target_json_path) as f:
555
+ kpts_trg_dict = json.load(f)['kps']
556
+
557
+ source_kps_raw = _get_kps_tensor(kpts_src_dict, num_kps)
558
+ target_kps_raw = _get_kps_tensor(kpts_trg_dict, num_kps)
559
+
560
+ # print(f"Source keypoints raw: {source_kps_raw.shape}, Target keypoints raw: {target_kps_raw.shape}")
561
+
562
+ # Preprocess keypoints (padding, scaling, etc.)
563
+ w_src, h_src = pair_data["src_imsize"][:2]
564
+ w_trg, h_trg = pair_data["trg_imsize"][:2]
565
+
566
+ source_kps, src_x, src_y, src_scale = preprocess_kps_pad(source_kps_raw, w_src, h_src, size)
567
+ target_kps, trg_x, trg_y, trg_scale = preprocess_kps_pad(target_kps_raw, w_trg, h_trg, size)
568
+
569
+ # Calculate thresholds from bounding boxes
570
+ source_bbox = np.asarray(pair_data["src_bndbox"])
571
+ target_bbox = np.asarray(pair_data["trg_bndbox"])
572
+ thresholds = []
573
+ if split == 'test' or split == 'val':
574
+ thresholds.append(max(target_bbox[3] - target_bbox[1], target_bbox[2] - target_bbox[0]) * trg_scale)
575
+ elif split == 'trn':
576
+ thresholds.append(max(source_bbox[3] - source_bbox[1], source_bbox[2] - source_bbox[0]) * src_scale)
577
+ thresholds.append(max(target_bbox[3] - target_bbox[1], target_bbox[2] - target_bbox[0]) * trg_scale)
578
+
579
+ # --- 3. Format output ---
580
+ kps = torch.stack([source_kps, target_kps])
581
+ used_kps_indices, = torch.where(kps[:, :, 2].any(dim=0))
582
+ kps_final = kps[:, used_kps_indices, :]
583
+
584
+ if unfiltered:
585
+ return files, kps, thresholds, used_kps_indices
586
+ else:
587
+ return files, kps_final, thresholds, used_kps_indices
588
+
589
+
590
+
591
+
592
+ ######################################
593
+ # original loading function
594
+ ######################################
595
+
596
+ def load_spair_data(path="data/SPair-71k", size=256, category='cat', split='test', subsample=None):
597
+ np.random.seed(42)
598
+ pairs = sorted(glob(f'{path}/PairAnnotation/{split}/*:{category}.json'))
599
+ if subsample is not None and subsample > 0:
600
+ pairs = [pairs[ix] for ix in np.random.choice(len(pairs), subsample)]
601
+ files = []
602
+ thresholds = []
603
+ kps = []
604
+ category_anno = list(glob(f'{path}/ImageAnnotation/{category}/*.json'))[0]
605
+ with open(category_anno) as f:
606
+ num_kps = len(json.load(f)['kps'])
607
+ for pair in pairs:
608
+ source_kps = torch.zeros(num_kps, 3)
609
+ target_kps = torch.zeros(num_kps, 3)
610
+ with open(pair) as f:
611
+ data = json.load(f)
612
+ assert category == data["category"]
613
+ source_fn = f'{path}/JPEGImages/{category}/{data["src_imname"]}'
614
+ target_fn = f'{path}/JPEGImages/{category}/{data["trg_imname"]}'
615
+ source_json_name = source_fn.replace('JPEGImages','ImageAnnotation').replace('jpg','json')
616
+ target_json_name = target_fn.replace('JPEGImages','ImageAnnotation').replace('jpg','json')
617
+ source_bbox = np.asarray(data["src_bndbox"]) # (x1, y1, x2, y2)
618
+ target_bbox = np.asarray(data["trg_bndbox"])
619
+ with open(source_json_name) as f:
620
+ file = json.load(f)
621
+ kpts_src = file['kps']
622
+ with open(target_json_name) as f:
623
+ file = json.load(f)
624
+ kpts_trg = file['kps']
625
+
626
+ source_size = data["src_imsize"][:2] # (W, H)
627
+ target_size = data["trg_imsize"][:2] # (W, H)
628
+
629
+ for i in range(30):
630
+ point = kpts_src[str(i)]
631
+ if point is None:
632
+ source_kps[i, :3] = 0
633
+ else:
634
+ source_kps[i, :2] = torch.Tensor(point).float() # set x and y
635
+ source_kps[i, 2] = 1
636
+ source_kps, src_x, src_y, src_scale = preprocess_kps_pad(source_kps, source_size[0], source_size[1], size)
637
+
638
+ for i in range(30):
639
+ point = kpts_trg[str(i)]
640
+ if point is None:
641
+ target_kps[i, :3] = 0
642
+ else:
643
+ target_kps[i, :2] = torch.Tensor(point).float()
644
+ target_kps[i, 2] = 1
645
+ # target_raw_kps = torch.cat([torch.tensor(data["trg_kps"], dtype=torch.float), torch.ones(kp_ixs.size(0), 1)], 1)
646
+ # target_kps = blank_kps.scatter(dim=0, index=kp_ixs, src=target_raw_kps)
647
+ target_kps, trg_x, trg_y, trg_scale = preprocess_kps_pad(target_kps, target_size[0], target_size[1], size)
648
+ if split == 'test' or split == 'val':
649
+ thresholds.append(max(target_bbox[3] - target_bbox[1], target_bbox[2] - target_bbox[0])*trg_scale)
650
+ elif split == 'trn':
651
+ thresholds.append(max(source_bbox[3] - source_bbox[1], source_bbox[2] - source_bbox[0])*src_scale)
652
+ thresholds.append(max(target_bbox[3] - target_bbox[1], target_bbox[2] - target_bbox[0])*trg_scale)
653
+
654
+ kps.append(source_kps)
655
+ kps.append(target_kps)
656
+ files.append(source_fn)
657
+ files.append(target_fn)
658
+ kps = torch.stack(kps)
659
+ used_kps, = torch.where(kps[:, :, 2].any(dim=0))
660
+ kps = kps[:, used_kps, :]
661
+
662
+ return files, kps, thresholds, used_kps
663
+
664
+
665
+ def load_pascal_data(path="data/PF-dataset-PASCAL", size=256, category='cat', split='test', subsample=None):
666
+
667
+ def get_points(point_coords_list, idx):
668
+ X = np.fromstring(point_coords_list.iloc[idx, 0], sep=";")
669
+ Y = np.fromstring(point_coords_list.iloc[idx, 1], sep=";")
670
+ Xpad = -np.ones(20)
671
+ Xpad[: len(X)] = X
672
+ Ypad = -np.ones(20)
673
+ Ypad[: len(X)] = Y
674
+ Zmask = np.zeros(20)
675
+ Zmask[: len(X)] = 1
676
+ point_coords = np.concatenate(
677
+ (Xpad.reshape(1, 20), Ypad.reshape(1, 20), Zmask.reshape(1,20)), axis=0
678
+ )
679
+ # make arrays float tensor for subsequent processing
680
+ point_coords = torch.Tensor(point_coords.astype(np.float32))
681
+ return point_coords
682
+
683
+ np.random.seed(42)
684
+ files = []
685
+ kps = []
686
+ test_data = pd.read_csv(f'{path}/{split}_pairs_pf_pascal.csv')
687
+ cls = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
688
+ 'bus', 'car', 'cat', 'chair', 'cow',
689
+ 'diningtable', 'dog', 'horse', 'motorbike', 'person',
690
+ 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
691
+ cls_ids = test_data.iloc[:,2].values.astype("int") - 1
692
+ cat_id = cls.index(category)
693
+ subset_id = np.where(cls_ids == cat_id)[0]
694
+ # logger.info(f'Number of Pairs for {category} = {len(subset_id)}')
695
+ subset_pairs = test_data.iloc[subset_id,:]
696
+ src_img_names = np.array(subset_pairs.iloc[:,0])
697
+ trg_img_names = np.array(subset_pairs.iloc[:,1])
698
+ # print(src_img_names.shape, trg_img_names.shape)
699
+ if not split.startswith('train'):
700
+ point_A_coords = subset_pairs.iloc[:,3:5]
701
+ point_B_coords = subset_pairs.iloc[:,5:]
702
+ # print(point_A_coords.shape, point_B_coords.shape)
703
+ for i in range(len(src_img_names)):
704
+ src_fn= f'{path}/../{src_img_names[i]}'
705
+ trg_fn= f'{path}/../{trg_img_names[i]}'
706
+ src_size=Image.open(src_fn).size
707
+ trg_size=Image.open(trg_fn).size
708
+
709
+ if not split.startswith('train'):
710
+ point_coords_src = get_points(point_A_coords, i).transpose(1,0)
711
+ point_coords_trg = get_points(point_B_coords, i).transpose(1,0)
712
+ else:
713
+ src_anns = os.path.join(path, 'Annotations', category,
714
+ os.path.basename(src_fn))[:-4] + '.mat'
715
+ trg_anns = os.path.join(path, 'Annotations', category,
716
+ os.path.basename(trg_fn))[:-4] + '.mat'
717
+ point_coords_src = process_kps_pascal(read_mat(src_anns, 'kps'))
718
+ point_coords_trg = process_kps_pascal(read_mat(trg_anns, 'kps'))
719
+
720
+ # print(src_size)
721
+ source_kps, src_x, src_y, src_scale = preprocess_kps_pad(point_coords_src, src_size[0], src_size[1], size)
722
+ target_kps, trg_x, trg_y, trg_scale = preprocess_kps_pad(point_coords_trg, trg_size[0], trg_size[1], size)
723
+ kps.append(source_kps)
724
+ kps.append(target_kps)
725
+ files.append(src_fn)
726
+ files.append(trg_fn)
727
+
728
+ kps = torch.stack(kps)
729
+ used_kps, = torch.where(kps[:, :, 2].any(dim=0))
730
+ kps = kps[:, used_kps, :]
731
+ # logger.info(f'Final number of used key points: {kps.size(1)}')
732
+ return files, kps, None, used_kps
733
+
734
+
735
+ def load_eval_data(args, path, category, split):
736
+ # if args.EVAL_DATASET == 'ap10k':
737
+ # files, kps, thresholds, used_kps = load_ap10k_data(path, args.ANNO_SIZE, category, split, args.TEST_SAMPLE)
738
+ print(f"Loading evaluation data for dataset: {args.EVAL_DATASET}, category: {category}, split: {split}, test sample: {args.TEST_SAMPLE}")
739
+ if args.EVAL_DATASET == 'pascal':
740
+ files, kps, thresholds, used_kps = load_pascal_data(path, args.ANNO_SIZE, category, split, args.TEST_SAMPLE)
741
+ elif args.EVAL_DATASET == 'spair':
742
+ files, kps, thresholds, used_kps = load_spair_data(path, args.ANNO_SIZE, category, split, args.TEST_SAMPLE)
743
+
744
+ return files, kps, thresholds, used_kps
745
+
746
+
747
+ ###### plot helper
748
+ from PIL import Image, ImageDraw, ImageFont
749
+
750
+ def draw_bbox_point_grid(
751
+ image,
752
+ bbox=None,
753
+ point=None,
754
+ box_color=(0, 255, 0),
755
+ pt_color=(255, 0, 0),
756
+ width=5,
757
+ draw_grid=False,
758
+ step=50, # pixels between grid lines
759
+ grid_color=(255, 255, 255),
760
+ grid_width=1,
761
+ add_text=True,
762
+ dilation=28
763
+ ):
764
+ """Draw bbox, point, and optional grid on a PIL image.
765
+
766
+ Args
767
+ ----
768
+ image (PIL.Image): target image (modified in place if not copied).
769
+ bbox (list | tuple): [x1, y1, x2, y2] or None.
770
+ point (tuple): (x, y) or None.
771
+ color (tuple): RGB for bbox / point.
772
+ width (int): line width for bbox.
773
+ draw_grid (bool): enable/disable grid.
774
+ step (int): grid spacing in pixels.
775
+ grid_color (tuple): RGB for grid.
776
+ grid_width (int): line width for grid.
777
+ """
778
+ draw = ImageDraw.Draw(image)
779
+
780
+ if dilation > 0 and bbox is not None:
781
+ # Dilation logic: expand bbox by dilation pixels
782
+ x1, y1, x2, y2 = bbox
783
+ bbox = (x1 - dilation, y1 - dilation, x2 + dilation, y2 + dilation)
784
+
785
+ # ── draw grid ───────────────────────────────────────────
786
+ if draw_grid and step > 0:
787
+ w, h = image.size
788
+ # vertical lines
789
+ for x in range(0, w, step):
790
+ draw.line([(x, 0), (x, h)], fill=grid_color, width=grid_width)
791
+ # horizontal lines
792
+ for y in range(0, h, step):
793
+ draw.line([(0, y), (w, y)], fill=grid_color, width=grid_width)
794
+
795
+ # ── draw bbox ──────────────────────────────────────────
796
+ if bbox is not None:
797
+ draw.rectangle(bbox, outline=box_color, width=width)
798
+
799
+ # ── draw point ─────────────────────────────────────────
800
+ if point is not None:
801
+ radius = 20
802
+ x, y = point
803
+ draw.ellipse(
804
+ (x - radius, y - radius, x + radius, y + radius),
805
+ fill=pt_color
806
+ )
807
+ # add a white text at the center of the point
808
+ # add a white text at the center of the point
809
+ if add_text:
810
+ text = "Ref"
811
+ # Try to use a better font, or fall back to the default if not found
812
+ # try:
813
+ font = ImageFont.truetype("DejaVuSans.ttf", size=26)
814
+ # except IOError:
815
+ # print('test')
816
+ # font = ImageFont.load_default()
817
+
818
+ # Get text bounding box for centering
819
+ print(font)
820
+ bbox_text = draw.textbbox((0, 0), text, font=font)
821
+ text_width = bbox_text[2] - bbox_text[0]
822
+ text_height = bbox_text[3] - bbox_text[1]
823
+
824
+ text_x = x - text_width // 2
825
+ text_y = y - text_height // 2
826
+ draw.text((text_x, text_y), text, font=font, fill=(255, 255, 255), text_anchor = "mm")
827
+
828
+
829
+ return image
Code/sc_dit/all_comb_dp_pck_results.csv ADDED
The diff for this file is too large to render. See raw diff
 
Code/sc_dit/dataset.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ def get_dataset_info(args, split):
4
+ if args.EVAL_DATASET == 'pascal':
5
+ # data_dir = 'data/PF-dataset-PASCAL'
6
+ data_dir = '../../Datasets/PF-dataset-PASCAL'
7
+ categories = sorted(os.listdir(os.path.join(data_dir, 'Annotations')))
8
+ # elif args.EVAL_DATASET == 'ap10k':
9
+ # data_dir = 'data/ap-10k'
10
+ # categories = []
11
+ # subfolders = os.listdir(os.path.join(data_dir, 'ImageAnnotation'))
12
+ # # Handle AP10K_EVAL test settings
13
+ # if args.AP10K_EVAL_SUBSET == 'intra-species':
14
+ # categories = [folder for subfolder in subfolders for folder in os.listdir(os.path.join(data_dir, 'ImageAnnotation', subfolder))]
15
+ # elif args.AP10K_EVAL_SUBSET == 'cross-species':
16
+ # categories = [subfolder for subfolder in subfolders if len(os.listdir(os.path.join(data_dir, 'ImageAnnotation', subfolder))) > 1]
17
+ # split += '_cross_species'
18
+ # elif args.AP10K_EVAL_SUBSET == 'cross-family':
19
+ # categories = ['all']
20
+ # split += '_cross_family'
21
+ # categories = sorted(categories)
22
+ # if split == 'val':
23
+ # # remove category "king cheetah" from categories, since it is not present in the validation set
24
+ # categories.remove('king cheetah')
25
+ elif args.EVAL_DATASET == 'spair': # SPair
26
+ # data_dir = 'data/SPair-71k'
27
+ data_dir = '../../Datasets/SPair-71k'
28
+ categories = sorted(os.listdir(os.path.join(data_dir, 'ImageAnnotation')))
29
+
30
+ return data_dir, categories, split
31
+
32
+
33
+
34
+ # SPair-71k dataset for batch processing
35
+ from PIL import Image
36
+ from torch.utils.data import Dataset
37
+
38
+ class VLDataset(Dataset):
39
+ """A simple dataset to wrap a list of images and prompts for the DataLoader."""
40
+ def __init__(self, images, prompts):
41
+ self.images = images
42
+ self.prompts = prompts
43
+
44
+ def __len__(self):
45
+ return len(self.images)
46
+
47
+ def __getitem__(self, idx):
48
+ # The DataLoader will call this for each item
49
+ return self.images[idx], self.prompts[idx]
50
+
51
+
52
+ class VLDatasetPaired(Dataset):
53
+ """A simple dataset to wrap a list of images and prompts for the DataLoader."""
54
+ def __init__(self, source_imgs, target_imgs, prompts):
55
+ self.source_imgs = source_imgs
56
+ self.target_imgs = target_imgs
57
+ self.prompts = prompts
58
+
59
+ def __len__(self):
60
+ return len(self.source_imgs)
61
+
62
+ def __getitem__(self, idx):
63
+ # The DataLoader will call this for each item
64
+ return self.source_imgs[idx], self.target_imgs[idx], self.prompts[idx]
Code/sc_dit/eval_bbox_acc.py ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import os
3
+ import argparse
4
+ import pandas as pd
5
+ import ast
6
+ from tqdm import tqdm
7
+ import json
8
+
9
+ # custom imports
10
+ from dataset import get_dataset_info
11
+ from utils import load_eval_data
12
+
13
+
14
+ # def compute_bbox_accuracy(pred_bbox, gt_point, dilation=0):
15
+ # x1, y1, x2, y2 = pred_bbox
16
+ # if dilation > 0:
17
+ # # Dilate the bounding box
18
+ # x1 -= dilation
19
+ # y1 -= dilation
20
+ # x2 += dilation
21
+ # y2 += dilation
22
+ # x_gt, y_gt = gt_point
23
+
24
+ # # Check if the ground truth point is inside the predicted bounding box
25
+ # if x1 <= x_gt <= x2 and y1 <= y_gt <= y2:
26
+ # return 1.0
27
+ # else:
28
+ # return 0.0
29
+
30
+
31
+ import numpy as np
32
+ import os
33
+ import argparse
34
+ import pandas as pd
35
+ import ast
36
+ from tqdm import tqdm
37
+
38
+ # custom imports
39
+ from dataset import get_dataset_info
40
+ from utils import load_eval_data
41
+
42
+ def get_evaluation_outcome(pred_bbox, gt_point, dilation=0):
43
+ """Evaluates a single prediction and returns the outcome (TP, FP, FN, or TN)."""
44
+ is_gt_visible = not np.array_equal(gt_point, [0, 0])
45
+ is_pred_valid = pred_bbox is not None and pred_bbox != [0, 0, 0, 0]
46
+
47
+ if is_gt_visible:
48
+ if is_pred_valid:
49
+ x1, y1, x2, y2 = pred_bbox
50
+ if dilation > 0:
51
+ x1, y1, x2, y2 = x1 - dilation, y1 - dilation, x2 + dilation, y2 + dilation
52
+ x_gt, y_gt = gt_point
53
+ is_hit = (x1 <= x_gt <= x2) and (y1 <= y_gt <= y2)
54
+ return 'tp' if is_hit else 'fp'
55
+ else:
56
+ return 'fn'
57
+ else: # GT is not visible
58
+ return 'fp' if is_pred_valid else 'tn'
59
+
60
+ def calculate_metrics(tp, fp, fn, tn):
61
+ """Calculates all metrics from the confusion matrix components."""
62
+ total = tp + fp + fn + tn
63
+ accuracy = (tp + tn) / total if total > 0 else 0.0
64
+ precision = tp / (tp + fp) if (tp + fp) > 0 else 0.0
65
+ recall = tp / (tp + fn) if (tp + fn) > 0 else 0.0
66
+ f1 = 2 * (precision * recall) / (precision + recall) if (precision + recall) > 0 else 0.0
67
+ return {"Accuracy": accuracy, "Precision": precision, "Recall": recall, "F1-Score": f1}
68
+
69
+ def main(args):
70
+ results_df = pd.read_csv(args.RESULT_DIR)
71
+ data_dir, categories, split = get_dataset_info(args, split='test')
72
+
73
+
74
+ # --- Prepare directory for saving wrong predictions ---
75
+ wrong_pred_dir = args.OUTPUT_WRONG_PRED
76
+ if wrong_pred_dir:
77
+ # Ensure the directory exists
78
+ os.makedirs(wrong_pred_dir, exist_ok=True)
79
+ print(f"Wrong predictions will be saved in: {wrong_pred_dir}")
80
+ # if args.OUTPUT_CSV:
81
+ # base_dir = os.path.dirname(args.OUTPUT_CSV)
82
+ # if base_dir: # Ensure base_dir is not empty if path is just a filename
83
+ # wrong_pred_dir = os.path.join(base_dir, "wrong_predictions")
84
+ # os.makedirs(wrong_pred_dir, exist_ok=True)
85
+
86
+ print("Pre-loading ground truth data into a lookup table...")
87
+ gt_data = {}
88
+ for category in categories:
89
+ gt_data[category] = {}
90
+ files, kps, _, _ = load_eval_data(args, data_dir, category, split=split)
91
+ for i, file_path in enumerate(files):
92
+ file_id = file_path.split('/')[-1].split('.')[0]
93
+ gt_data[category][file_id] = kps[i]
94
+
95
+ # Expanded stats dictionary to track source and target separately
96
+ stats = {cat: {'tp_src': 0, 'fp_src': 0, 'fn_src': 0, 'tn_src': 0,
97
+ 'tp_tgt': 0, 'fp_tgt': 0, 'fn_tgt': 0, 'tn_tgt': 0} for cat in categories}
98
+
99
+ matched_rows = 0
100
+
101
+ print("Evaluating predictions...")
102
+ for _, row in tqdm(results_df.iterrows(), total=len(results_df)):
103
+ category, src_id, tgt_id, kpt_id = row['category'], row['src_id'], row['tgt_id'], row['kpt_id']
104
+
105
+ if src_id not in gt_data.get(category, {}) or tgt_id not in gt_data.get(category, {}):
106
+ continue
107
+
108
+ matched_rows += 1
109
+
110
+ # --- Process Source Image ---
111
+ kps_src_img = gt_data[category][src_id][kpt_id]
112
+ is_src_visible = kps_src_img.numpy()[2] == 1
113
+ src_kp_gt = kps_src_img.numpy()[:2] if is_src_visible else np.array([0, 0])
114
+ src_bbox = ast.literal_eval(row['src_bbox']) if pd.notna(row['src_bbox']) else None
115
+ src_outcome = get_evaluation_outcome(src_bbox, src_kp_gt, args.DILATION)
116
+ stats[category][f"{src_outcome.lower()}_src"] += 1
117
+
118
+ # --- Process Target Image ---
119
+ kps_tgt_img = gt_data[category][tgt_id][kpt_id]
120
+ is_tgt_visible = kps_tgt_img.numpy()[2] == 1
121
+ tgt_kp_gt = kps_tgt_img.numpy()[:2] if is_tgt_visible else np.array([0, 0])
122
+ tgt_bbox = ast.literal_eval(row['tgt_bbox']) if pd.notna(row['tgt_bbox']) else None
123
+ tgt_outcome = get_evaluation_outcome(tgt_bbox, tgt_kp_gt, args.DILATION)
124
+ stats[category][f"{tgt_outcome.lower()}_tgt"] += 1
125
+
126
+ # --- NEW: Log wrong predictions if a save path is provided ---
127
+ if wrong_pred_dir:
128
+ if src_outcome in ['fp', 'fn'] or tgt_outcome in ['fp', 'fn']:
129
+ filename = f"{category}_{src_id}_{tgt_id}_kps:{kpt_id}_src:{src_outcome}_tgt:{tgt_outcome}.json"
130
+ data_to_save = {'src_name': src_id,
131
+ 'src_pred_bbox': src_bbox,
132
+ 'src_gt_point': src_kp_gt.tolist(),
133
+ 'tgt_name': tgt_id,
134
+ 'tgt_pred_bbox': tgt_bbox,
135
+ 'tgt_gt_point': tgt_kp_gt.tolist()
136
+ }
137
+ with open(os.path.join(wrong_pred_dir, filename), 'w') as f:
138
+ json.dump(data_to_save, f, indent=4)
139
+
140
+
141
+
142
+
143
+
144
+ # print(stats)
145
+
146
+ print(f"\nSuccessfully matched and evaluated {matched_rows} rows.")
147
+ if matched_rows == 0:
148
+ print("WARNING: No rows were matched. Check IDs in your CSV.")
149
+ return
150
+
151
+ # --- Step 4: Aggregate results for reporting and saving ---
152
+ reports = {'source': {}, 'target': {}}
153
+ src_totals = {'tp': 0, 'fp': 0, 'fn': 0, 'tn': 0}
154
+ tgt_totals = {'tp': 0, 'fp': 0, 'fn': 0, 'tn': 0}
155
+
156
+ # Calculate metrics for each category and accumulate totals
157
+ for category, s in stats.items():
158
+ # Correctly assign metrics to 'source' and 'target' reports
159
+ reports['source'][category] = calculate_metrics(s['tp_src'], s['fp_src'], s['fn_src'], s['tn_src'])
160
+ reports['target'][category] = calculate_metrics(s['tp_tgt'], s['fp_tgt'], s['fn_tgt'], s['tn_tgt'])
161
+
162
+ # Accumulate totals for the 'Average' column
163
+ for key in src_totals.keys():
164
+ src_totals[key] += s[f'{key}_src']
165
+ tgt_totals[key] += s[f'{key}_tgt']
166
+
167
+ # Calculate overall average metrics
168
+ reports['source']['Average'] = calculate_metrics(src_totals['tp'], src_totals['fp'], src_totals['fn'], src_totals['tn'])
169
+ reports['target']['Average'] = calculate_metrics(tgt_totals['tp'], tgt_totals['fp'], tgt_totals['fn'], tgt_totals['tn'])
170
+
171
+ # --- Step 5: Print reports and save to CSV ---
172
+ for report_type in ['source', 'target']:
173
+ df = pd.DataFrame(reports[report_type])
174
+
175
+ print(f"\n--- Evaluation Summary ({report_type.capitalize()} Images) ---")
176
+ print(df.round(4).to_string())
177
+
178
+ # Check if the user provided an output path
179
+ if args.OUTPUT_CSV:
180
+ # Create a specific filename for each report
181
+ base, ext = os.path.splitext(args.OUTPUT_CSV)
182
+ save_path = f"{base}_{report_type}{ext}"
183
+ try:
184
+ df.to_csv(save_path)
185
+ print(f"Results successfully saved to: {save_path}")
186
+ except Exception as e:
187
+ print(f"\nError saving CSV file to {save_path}: {e}")
188
+
189
+ if __name__ == "__main__":
190
+ parser = argparse.ArgumentParser(description="Evaluate bounding box accuracy")
191
+ # ... (all your arguments are the same) ...
192
+ parser.add_argument('--EVAL_DATASET', type=str, required=True, choices=['pascal', 'spair'])
193
+ parser.add_argument('--ANNO_SIZE', type=int, default=840)
194
+ parser.add_argument('--TEST_SAMPLE', type=int, default=0)
195
+ parser.add_argument('--RESULT_DIR', type=str, required=True)
196
+ parser.add_argument('--DILATION', type=int, default=0)
197
+ parser.add_argument('--OUTPUT_CSV', type=str, default=None, help='Optional path to save the final report. Will create _src.csv and _tgt.csv.')
198
+ parser.add_argument('--OUTPUT_WRONG_PRED', type=str, default=None, help='Optional path to save wrong predictions. Will create a directory named "wrong_predictions" in the same location as OUTPUT_CSV.')
199
+
200
+ args = parser.parse_args()
201
+ main(args)
202
+
Code/sc_dit/evaluate_vlm_judge.py ADDED
@@ -0,0 +1,300 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import numpy as np
3
+ import ast
4
+ import argparse
5
+ import os
6
+ from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
7
+ from tqdm import tqdm
8
+ import json
9
+ from collections import defaultdict
10
+
11
+ # --- Utility Functions & Imports from your original script ---
12
+ # NOTE: This script assumes you have 'utils.py' and 'dataset.py' files
13
+ # with the necessary functions. I've included placeholders if you don't.
14
+
15
+ from utils import load_eval_data
16
+ from dataset import get_dataset_info
17
+
18
+ # --- Re-used function from your script ---
19
+ def get_evaluation_outcome(pred_bbox, gt_point, dilation=0):
20
+ """
21
+ Evaluates a single prediction and returns the outcome (TP, FP, FN, or TN).
22
+ """
23
+ is_gt_visible = not np.array_equal(gt_point, [0, 0])
24
+ is_pred_valid = pred_bbox is not None and pred_bbox != [0, 0, 0, 0]
25
+
26
+ if is_gt_visible:
27
+ if is_pred_valid:
28
+ x1, y1, x2, y2 = pred_bbox
29
+ if dilation > 0:
30
+ x1, y1, x2, y2 = x1 - dilation, y1 - dilation, x2 + dilation, y2 + dilation
31
+ x_gt, y_gt = gt_point
32
+ is_hit = (x1 <= x_gt <= x2) and (y1 <= y_gt <= y2)
33
+ return 'tp' if is_hit else 'fp'
34
+ else:
35
+ return 'fn'
36
+ else:
37
+ return 'fp' if is_pred_valid else 'tn'
38
+
39
+ # --- Main Analysis Function ---
40
+ def analyze_accuracy(args):
41
+ """
42
+ Analyzes the accuracy of the 'final_answer' column against programmatic evaluation.
43
+ """
44
+ # --- 1. Load and Merge Data ---
45
+ print("Loading and merging data files...")
46
+ try:
47
+ # Load the file with human-provided "yes"/"no" answers
48
+ answers_df = pd.read_csv(args.ANSWER_FILE)
49
+ # Load the original prediction results which contain the bounding boxes
50
+ results_df = pd.read_csv(args.RESULT_DIR)
51
+ except FileNotFoundError as e:
52
+ print(f"Error: {e}. Please check your file paths.")
53
+ return
54
+
55
+ # Merge the two dataframes to have bounding boxes and final_answer in one place
56
+ # The key identifies a unique prediction for a specific keypoint in a specific image pair
57
+ merge_keys = ['category', 'src_id', 'tgt_id', 'kpt_id']
58
+ merged_df = pd.merge(answers_df, results_df, on=merge_keys, how='left')
59
+
60
+ if merged_df['src_bbox'].isnull().any():
61
+ print("Warning: Some rows in the answer file could not be matched with results. They will be skipped.")
62
+ merged_df.dropna(subset=['src_bbox', 'tgt_bbox'], inplace=True)
63
+
64
+ print(f"Successfully merged data. Total rows to analyze: {len(merged_df)}")
65
+
66
+ # --- 2. Load Ground Truth Data ---
67
+ data_dir, categories, split = get_dataset_info(args, split='test')
68
+ print("Pre-loading ground truth data into a lookup table...")
69
+ gt_data = {}
70
+ for category in tqdm(categories, desc="Loading GT"):
71
+ gt_data[category] = {}
72
+ files, kps, _, _ = load_eval_data(args, data_dir, category, split=split)
73
+ for i, file_path in enumerate(files):
74
+ file_id = os.path.basename(file_path).split('.')[0]
75
+ print('loading gt file path:', file_path)
76
+ gt_data[category][file_id] = kps[i]
77
+
78
+ # --- 3. Evaluate and Compare ---
79
+ y_true_script = [] # What the script determines (Correct/Incorrect)
80
+ y_pred_answer = [] # What the 'final_answer' column says (yes/no)
81
+
82
+ # create a dictionary to hold the results for each pair
83
+ results_columns = ['src_id', 'tgt_id', 'kpt_id', 'gt_judge', 'pred_judge']
84
+ results_data = defaultdict(list)
85
+
86
+ print("Comparing 'final_answer' against script evaluation...")
87
+ for _, row in tqdm(merged_df.iterrows(), total=len(merged_df), desc="Analyzing rows"):
88
+ category = row['category']
89
+ src_id = str(row['src_id'])
90
+ tgt_id = str(row['tgt_id'])
91
+ kpt_id = int(row['kpt_id'])
92
+
93
+ # Skip if ground truth is missing for some reason
94
+ if src_id not in gt_data.get(category, {}) or tgt_id not in gt_data.get(category, {}):
95
+ continue
96
+
97
+ # # Evaluate Source Image
98
+ # kps_src_img = gt_data[category][src_id][kpt_id]
99
+ # src_kp_gt = kps_src_img.numpy()[:2] if kps_src_img.numpy()[2] == 1 else np.array([0, 0])
100
+ # src_bbox_pred = ast.literal_eval(row['src_bbox']) if pd.notna(row['src_bbox']) else None
101
+ # src_outcome = get_evaluation_outcome(src_bbox_pred, src_kp_gt, args.DILATION)
102
+
103
+ # Evaluate Target Image
104
+ kps_tgt_img = gt_data[category][tgt_id][kpt_id]
105
+ tgt_kp_gt = kps_tgt_img.numpy()[:2] if kps_tgt_img.numpy()[2] == 1 else np.array([0, 0])
106
+ tgt_bbox_pred = ast.literal_eval(row['tgt_bbox']) if pd.notna(row['tgt_bbox']) else None
107
+ tgt_outcome = get_evaluation_outcome(tgt_bbox_pred, tgt_kp_gt, args.DILATION)
108
+
109
+ # --- Determine the "ground truth" based on script logic ---
110
+ # A prediction is considered correct if BOTH source and target are 'tp' or 'tn'.
111
+ # is_script_correct = (src_outcome in ['tp', 'tn']) and (tgt_outcome in ['tp', 'tn'])
112
+ is_script_correct = (tgt_outcome in ['tp', 'tn'])
113
+ script_label = 'yes' if is_script_correct else 'no'
114
+
115
+ y_true_script.append(script_label)
116
+ y_pred_answer.append(row['final_answer'].lower()) # Use lower() for consistency
117
+
118
+ # --- Store results for this pair ---
119
+ results_data['src_id'].append(src_id)
120
+ results_data['tgt_id'].append(tgt_id)
121
+ results_data['kpt_id'].append(kpt_id)
122
+ results_data['gt_judge'].append(script_label)
123
+ results_data['pred_judge'].append(row['final_answer'].lower())
124
+
125
+
126
+ # --- 4. Report Metrics ---
127
+ if not y_true_script:
128
+ print("\nNo data was processed. Cannot generate a report. Check your file paths and content.")
129
+ return
130
+
131
+ print("\n--- Evaluation Report ---")
132
+
133
+ print(f"Total pairs evaluated: {len(y_true_script)}")
134
+ print(f"Total pair predictions: {len(y_pred_answer)}")
135
+ # print(f"Correctly identified pairs (script): {y_true_script.count('yes')
136
+
137
+ # Accuracy
138
+ accuracy = accuracy_score(y_true_script, y_pred_answer)
139
+ print(f"Overall Accuracy: {accuracy:.4f}\n")
140
+
141
+ # Confusion Matrix
142
+ labels = sorted(list(set(y_true_script) | set(y_pred_answer)))
143
+ cm = confusion_matrix(y_true_script, y_pred_answer, labels=labels)
144
+ cm_df = pd.DataFrame(cm, index=[f'Actual: {l}' for l in labels], columns=[f'Predicted: {l}' for l in labels])
145
+
146
+ print("Confusion Matrix:")
147
+ print(cm_df)
148
+ print("\n(Rows are the actual values from the script, Columns are the 'final_answer' predictions)")
149
+
150
+ # Classification Report (Precision, Recall, F1-Score)
151
+ print("\nClassification Report:")
152
+ report = classification_report(y_true_script, y_pred_answer, labels=labels, zero_division=0)
153
+ print(report)
154
+
155
+ # Save results to a DataFrame and then to a CSV
156
+ results_df = pd.DataFrame(results_data)
157
+ output_csv = os.path.join(args.OUTPUT_DIR, 'evaluation_results.csv')
158
+ os.makedirs(args.OUTPUT_DIR, exist_ok=True)
159
+ results_df.to_csv(output_csv, index=False)
160
+ print(f"\nResults saved to '{output_csv}'")
161
+
162
+ def filter_with_answers(args):
163
+ """
164
+ Filters predictions based on a 'final_answer' file and saves them to JSON.
165
+ """
166
+ # --- 1. Load and Merge Data ---
167
+ print("Loading and merging data files...")
168
+ try:
169
+ # Load the file with "yes"/"no" answers
170
+ answers_df = pd.read_csv(args.ANSWER_FILE)
171
+ # Load the original prediction results which contain the bounding boxes
172
+ results_df = pd.read_csv(args.RESULT_DIR)
173
+ except FileNotFoundError as e:
174
+ print(f"Error: {e}. Please check your file paths.")
175
+ return
176
+
177
+ # Merge dataframes to align bounding boxes with their corresponding "yes"/"no" answer
178
+ merge_keys = ['category', 'src_id', 'tgt_id', 'kpt_id']
179
+ merged_df = pd.merge(answers_df, results_df, on=merge_keys, how='left')
180
+
181
+ # Warn user if some answers don't have a matching prediction
182
+ if merged_df['src_bbox'].isnull().any():
183
+ print("Warning: Some rows in the answer file could not be matched with results. Their bboxes will be null.")
184
+
185
+ print(f"Successfully merged data. Total rows to process: {len(merged_df)}")
186
+
187
+ # --- 2. Load Ground Truth Data (for output file consistency) ---
188
+ data_dir, categories, split = get_dataset_info(args, split='test')
189
+ print("Pre-loading ground truth keypoint data...")
190
+ gt_data = {}
191
+ for category in tqdm(categories, desc="Loading GT"):
192
+ gt_data[category] = {}
193
+ files, kps, _, _ = load_eval_data(args, data_dir, category, split=split)
194
+ for i, file_path in enumerate(files):
195
+ file_id = os.path.basename(file_path).split('.')[0]
196
+ gt_data[category][file_id] = kps[i]
197
+
198
+ # --- 3. Prepare Data Structure for Final Output ---
199
+ pairs_data = defaultdict(lambda: {
200
+ 'src_bbox': [None] * 30,
201
+ 'tgt_bbox': [None] * 30,
202
+ 'src_kp_gt': [None] * 30,
203
+ 'tgt_kp_gt': [None] * 30
204
+ })
205
+
206
+ # --- 4. Process and Filter Each Row Based on 'final_answer' ---
207
+ print("Applying filtering logic based on 'final_answer' column...")
208
+ for _, row in tqdm(merged_df.iterrows(), total=len(merged_df)):
209
+ category = row['category']
210
+ src_id = str(row['src_id'])
211
+ tgt_id = str(row['tgt_id'])
212
+ kpt_id = int(row['kpt_id'])
213
+
214
+ final_answer = str(row['final_answer']).lower()
215
+
216
+ # Initialize final bboxes as None (to be discarded)
217
+ final_src_bbox = None
218
+ final_tgt_bbox = None
219
+
220
+ # If the answer is 'yes', we keep the bounding box
221
+ if final_answer == 'yes':
222
+ # if True:
223
+ # Safely evaluate the bounding box string from the CSV
224
+ src_bbox_pred = ast.literal_eval(row['src_bbox']) if pd.notna(row['src_bbox']) else [0, 0, 0, 0]
225
+ tgt_bbox_pred = ast.literal_eval(row['tgt_bbox']) if pd.notna(row['tgt_bbox']) else [0, 0, 0, 0]
226
+
227
+ # Apply dilation if specified and the box exists
228
+ if args.DILATION > 0:
229
+ if src_bbox_pred:
230
+ src_bbox_pred = [max(0, coord - args.DILATION) for coord in src_bbox_pred[:2]] + \
231
+ [coord + args.DILATION for coord in src_bbox_pred[2:]]
232
+ if tgt_bbox_pred:
233
+ tgt_bbox_pred = [max(0, coord - args.DILATION) for coord in tgt_bbox_pred[:2]] + \
234
+ [coord + args.DILATION for coord in tgt_bbox_pred[2:]]
235
+
236
+ final_src_bbox = src_bbox_pred
237
+ final_tgt_bbox = tgt_bbox_pred
238
+
239
+ # --- Store Filtered Data ---
240
+ pair_key = (src_id, tgt_id)
241
+ if 'src_name' not in pairs_data[pair_key]:
242
+ pairs_data[pair_key]['category'] = category
243
+ pairs_data[pair_key]['src_name'] = src_id
244
+ pairs_data[pair_key]['tgt_name'] = tgt_id
245
+
246
+ if 0 <= kpt_id < 30:
247
+ # Get GT keypoints for the output file
248
+ src_kp_gt, tgt_kp_gt = np.array([0,0]), np.array([0,0]) # Default
249
+ if src_id in gt_data.get(category, {}) and kpt_id < len(gt_data[category][src_id]):
250
+ kps_src_img = gt_data[category][src_id][kpt_id]
251
+ if kps_src_img.numpy()[2] == 1:
252
+ src_kp_gt = kps_src_img.numpy()[:2]
253
+
254
+ if tgt_id in gt_data.get(category, {}) and kpt_id < len(gt_data[category][tgt_id]):
255
+ kps_tgt_img = gt_data[category][tgt_id][kpt_id]
256
+ if kps_tgt_img.numpy()[2] == 1:
257
+ tgt_kp_gt = kps_tgt_img.numpy()[:2]
258
+
259
+ pairs_data[pair_key]['src_bbox'][kpt_id] = final_src_bbox
260
+ pairs_data[pair_key]['tgt_bbox'][kpt_id] = final_tgt_bbox
261
+ pairs_data[pair_key]['src_kp_gt'][kpt_id] = src_kp_gt.tolist()
262
+ pairs_data[pair_key]['tgt_kp_gt'][kpt_id] = tgt_kp_gt.tolist()
263
+
264
+ # --- 5. Save Filtered Data to JSON Files ---
265
+ output_dir = args.OUTPUT_DIR
266
+ os.makedirs(output_dir, exist_ok=True)
267
+ print(f"\nSaving {len(pairs_data)} filtered JSON files to '{output_dir}'...")
268
+
269
+ for pair_key, data in tqdm(pairs_data.items(), desc="Saving JSON"):
270
+ src_id, tgt_id = pair_key
271
+ output_filename = f"{src_id}-{tgt_id}.json"
272
+ output_filepath = os.path.join(output_dir, output_filename)
273
+ try:
274
+ with open(output_filepath, mode='w', encoding='utf-8') as jsonfile:
275
+ json.dump(data, jsonfile, indent=4)
276
+ except IOError as e:
277
+ print(f"Error writing to file '{output_filepath}': {e}")
278
+
279
+ print("Processing complete.")
280
+
281
+ if __name__ == '__main__':
282
+ parser = argparse.ArgumentParser(
283
+ description="Evaluate the accuracy of a 'final_answer' column in a CSV file."
284
+ )
285
+ # Args from your original script needed for evaluation
286
+ parser.add_argument('--EVAL_DATASET', type=str, required=True, choices=['pascal', 'spair'], help="Dataset to use for evaluation.")
287
+ parser.add_argument('--DILATION', type=int, default=0, help="Dilation for bounding box evaluation.")
288
+
289
+ # Args for the two files we need to compare
290
+ parser.add_argument('--RESULT_DIR', type=str, required=True, help="Path to the original CSV file with prediction bounding boxes.")
291
+ parser.add_argument('--ANSWER_FILE', type=str, required=True, help="Path to the new CSV file with the 'final_answer' column.")
292
+ parser.add_argument('--OUTPUT_DIR', type=str, required=True, help="Path to the directory for the final filtered JSON files.")
293
+
294
+ # Dummy args to satisfy your utility functions if needed
295
+ parser.add_argument('--ANNO_SIZE', type=int, default=840)
296
+ parser.add_argument('--TEST_SAMPLE', type=int, default=0)
297
+
298
+ args = parser.parse_args()
299
+ analyze_accuracy(args)
300
+ # filter_with_answers(args)
Code/sc_dit/experiment_vlm.ipynb ADDED
@@ -0,0 +1,594 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "id": "99fe7a9b",
6
+ "metadata": {},
7
+ "source": [
8
+ "## VLM coarse correspondence evaluation"
9
+ ]
10
+ },
11
+ {
12
+ "cell_type": "code",
13
+ "execution_count": null,
14
+ "id": "9a91edc6",
15
+ "metadata": {},
16
+ "outputs": [],
17
+ "source": [
18
+ "!python predict_correspondence_vlm.py --SYSTEM_PROMPT_SEM './prompts/sys_semantics.txt' --SYSTEM_PROMPT_BBOX './prompts/sys_bbox.txt' --TASK_PROMPT_SEM './prompts/tsk_semantics.txt' --TASK_PROMPT_BBOX './prompts/tsk_bbox.txt' --EVAL_DATASET 'spair' --ANNO_SIZE 840 --TEST_SAMPLE 0 --EXP_NOTE 'Qwen Demo Debug'"
19
+ ]
20
+ },
21
+ {
22
+ "cell_type": "markdown",
23
+ "id": "a6837f94",
24
+ "metadata": {},
25
+ "source": [
26
+ "## VLM batch inference"
27
+ ]
28
+ },
29
+ {
30
+ "cell_type": "code",
31
+ "execution_count": null,
32
+ "id": "196013ca",
33
+ "metadata": {},
34
+ "outputs": [],
35
+ "source": [
36
+ "!python inference_batched.py --SYSTEM_PROMPT_SEM './prompts/sys_semantics.txt' --SYSTEM_PROMPT_BBOX './prompts/sys_bbox.txt' --TASK_PROMPT_SEM './prompts/tsk_semantics.txt' --TASK_PROMPT_BBOX './prompts/tsk_bbox.txt' --EVAL_DATASET 'spair' --ANNO_SIZE 840 --TEST_SAMPLE 1 --EXP_NOTE 'Qwen-VL-7B batch Sub 1 test'"
37
+ ]
38
+ },
39
+ {
40
+ "cell_type": "code",
41
+ "execution_count": null,
42
+ "id": "5933b997",
43
+ "metadata": {},
44
+ "outputs": [],
45
+ "source": [
46
+ "!python inference_batched.py --SYSTEM_PROMPT_SEM './prompts/sys_semantics.txt' --SYSTEM_PROMPT_BBOX './prompts/sys_bbox.txt' --TASK_PROMPT_SEM './prompts/tsk_semantics.txt' --TASK_PROMPT_BBOX './prompts/tsk_bbox.txt' --EVAL_DATASET 'spair' --ANNO_SIZE 840 --TEST_SAMPLE 10 --EXP_NOTE 'Qwen-VL-32B batch Sub 10'"
47
+ ]
48
+ },
49
+ {
50
+ "cell_type": "code",
51
+ "execution_count": null,
52
+ "id": "9ee0ae2a",
53
+ "metadata": {},
54
+ "outputs": [],
55
+ "source": [
56
+ "!python inference_batched.py --SYSTEM_PROMPT_SEM './prompts/sys_semantics.txt' --SYSTEM_PROMPT_BBOX './prompts/sys_bbox.txt' --TASK_PROMPT_SEM './prompts/tsk_semantics.txt' --TASK_PROMPT_BBOX './prompts/tsk_bbox.txt' --EVAL_DATASET 'spair' --ANNO_SIZE 840 --TEST_SAMPLE 1 --EXP_NOTE 'Qwen-VL-7B batch Sub 1 DEBUG' --MODEL_NAME 'Qwen/Qwen2.5-VL-7B-Instruct' --DEBUG"
57
+ ]
58
+ },
59
+ {
60
+ "cell_type": "code",
61
+ "execution_count": null,
62
+ "id": "45cd5cde",
63
+ "metadata": {},
64
+ "outputs": [],
65
+ "source": [
66
+ "!python inference_batched.py --SYSTEM_PROMPT_SEM './prompts/sys_semantics.txt' --SYSTEM_PROMPT_BBOX './prompts/sys_bbox.txt' --TASK_PROMPT_SEM './prompts/tsk_semantics.txt' --TASK_PROMPT_BBOX './prompts/tsk_bbox.txt' --EVAL_DATASET 'spair' --ANNO_SIZE 840 --TEST_SAMPLE 1 --EXP_NOTE 'Qwen-VL-32B batch Sub 1' --MODEL_NAME 'Qwen/Qwen2.5-VL-32B-Instruct'"
67
+ ]
68
+ },
69
+ {
70
+ "cell_type": "code",
71
+ "execution_count": null,
72
+ "id": "76290aaf",
73
+ "metadata": {},
74
+ "outputs": [],
75
+ "source": [
76
+ "!python inference_batched.py --SYSTEM_PROMPT_SEM './prompts/sys_semantics.txt' --SYSTEM_PROMPT_BBOX './prompts/sys_bbox.txt' --TASK_PROMPT_SEM './prompts/tsk_semantics.txt' --TASK_PROMPT_BBOX './prompts/tsk_bbox.txt' --EVAL_DATASET 'spair' --ANNO_SIZE 840 --TEST_SAMPLE 10 --EXP_NOTE 'Qwen-VL-32B batch Sub 10' --MODEL_NAME 'Qwen/Qwen2.5-VL-32B-Instruct'"
77
+ ]
78
+ },
79
+ {
80
+ "cell_type": "markdown",
81
+ "id": "c5eb4df5",
82
+ "metadata": {},
83
+ "source": [
84
+ "with confidence"
85
+ ]
86
+ },
87
+ {
88
+ "cell_type": "code",
89
+ "execution_count": null,
90
+ "id": "84647c37",
91
+ "metadata": {},
92
+ "outputs": [],
93
+ "source": [
94
+ "!python inference_batched_with_conf.py --SYSTEM_PROMPT_SEM './prompts/sys_semantics_enhanced.txt' --SYSTEM_PROMPT_BBOX './prompts/sys_bbox_with_conf.txt' --TASK_PROMPT_SEM './prompts/tsk_semantics.txt' --TASK_PROMPT_BBOX './prompts/tsk_bbox.txt' --EVAL_DATASET 'spair' --ANNO_SIZE 840 --TEST_SAMPLE 1 --EXP_NOTE 'Qwen-VL-32B batch Sub 1' --MODEL_NAME 'Qwen/Qwen2.5-VL-32B-Instruct'"
95
+ ]
96
+ },
97
+ {
98
+ "cell_type": "code",
99
+ "execution_count": null,
100
+ "id": "eb629e68",
101
+ "metadata": {},
102
+ "outputs": [],
103
+ "source": [
104
+ "!python inference_batched_with_conf.py --SYSTEM_PROMPT_SEM './prompts/sys_semantics_enhanced.txt' --SYSTEM_PROMPT_BBOX './prompts/sys_bbox_with_conf.txt' --TASK_PROMPT_SEM './prompts/tsk_semantics.txt' --TASK_PROMPT_BBOX './prompts/tsk_bbox.txt' --EVAL_DATASET 'spair' --ANNO_SIZE 840 --TEST_SAMPLE 1 --EXP_NOTE 'Qwen-VL-32B Sub 1 with conf and eh sem' --MODEL_NAME 'Qwen/Qwen2.5-VL-32B-Instruct'"
105
+ ]
106
+ },
107
+ {
108
+ "cell_type": "markdown",
109
+ "id": "b0e750be",
110
+ "metadata": {},
111
+ "source": [
112
+ "gpt batched"
113
+ ]
114
+ },
115
+ {
116
+ "cell_type": "code",
117
+ "execution_count": null,
118
+ "id": "d42d796f",
119
+ "metadata": {},
120
+ "outputs": [],
121
+ "source": [
122
+ "!python inference_gpt_batched.py --SYSTEM_PROMPT './prompts/sys_paired.txt' --TASK_PROMPT './prompts/tsk_paired.txt' --EVAL_DATASET 'spair' --ANNO_SIZE 840 --TEST_SAMPLE 1 --MODEL_NAME='gpt-4.1-mini' --EXP_NOTE 'GPT 4.1 mini batch Sub 1'"
123
+ ]
124
+ },
125
+ {
126
+ "cell_type": "code",
127
+ "execution_count": null,
128
+ "id": "562e12ab",
129
+ "metadata": {},
130
+ "outputs": [],
131
+ "source": [
132
+ "!python inference_gpt_batched.py --SYSTEM_PROMPT './prompts/sys_paired.txt' --TASK_PROMPT './prompts/tsk_paired.txt' --EVAL_DATASET 'spair' --ANNO_SIZE 840 --TEST_SAMPLE 10 --MODEL_NAME='gpt-4.1-mini' --EXP_NOTE 'GPT 4.1 mini batch Sub 10'"
133
+ ]
134
+ },
135
+ {
136
+ "cell_type": "markdown",
137
+ "id": "f6ea9073",
138
+ "metadata": {},
139
+ "source": [
140
+ "## VLM evaluation"
141
+ ]
142
+ },
143
+ {
144
+ "cell_type": "code",
145
+ "execution_count": null,
146
+ "id": "267db584",
147
+ "metadata": {},
148
+ "outputs": [],
149
+ "source": [
150
+ "!python eval_bbox_acc.py --EVAL_DATASET 'spair' --ANNO_SIZE 840 --TEST_SAMPLE 1 --RESULT_DIR './results_vlm/csv/test_sample_1_qwen_vl_7b.csv' --DILATION 14 --OUTPUT_CSV './results_vlm/csv/test_sample_1_qwen_vl_7b_acc.csv'"
151
+ ]
152
+ },
153
+ {
154
+ "cell_type": "code",
155
+ "execution_count": null,
156
+ "id": "b914ee05",
157
+ "metadata": {},
158
+ "outputs": [],
159
+ "source": [
160
+ "!python eval_bbox_acc.py --EVAL_DATASET 'spair' --ANNO_SIZE 840 --TEST_SAMPLE 20 --RESULT_DIR './results_vlm/csv/test_sample_20_qwen_vl_32b.csv' --DILATION 28 --OUTPUT_CSV './results_vlm/csv/test_sample_20_qwen_vl_32b_acc.csv'"
161
+ ]
162
+ },
163
+ {
164
+ "cell_type": "code",
165
+ "execution_count": null,
166
+ "id": "eb4e3aa6",
167
+ "metadata": {},
168
+ "outputs": [],
169
+ "source": [
170
+ "!python eval_bbox_acc.py --EVAL_DATASET 'spair' --ANNO_SIZE 840 --TEST_SAMPLE 10 --RESULT_DIR './results_vlm/csv/test_sample_10_qwen_vl_32b.csv' --DILATION 28 --OUTPUT_CSV './results_vlm/csv/test_sample_10_qwen_vl_32b_acc.csv' --OUTPUT_WRONG_PRED './results_vlm/wrong_predictions/test_sample_10_qwen_vl_32b'"
171
+ ]
172
+ },
173
+ {
174
+ "cell_type": "code",
175
+ "execution_count": null,
176
+ "id": "6a926a5e",
177
+ "metadata": {},
178
+ "outputs": [],
179
+ "source": [
180
+ "!python eval_bbox_acc.py --EVAL_DATASET 'spair' --ANNO_SIZE 840 --TEST_SAMPLE 10 --RESULT_DIR './results_vlm/csv/test_sample_10_qwen_vl_32b.csv' --DILATION 84 --OUTPUT_CSV './results_vlm/csv/test_sample_10_qwen_vl_32b_acc_dilation_84.csv' --OUTPUT_WRONG_PRED './results_vlm/wrong_predictions/test_sample_10_qwen_vl_32b_dilation_84'"
181
+ ]
182
+ },
183
+ {
184
+ "cell_type": "code",
185
+ "execution_count": null,
186
+ "id": "5a926c0a",
187
+ "metadata": {},
188
+ "outputs": [],
189
+ "source": [
190
+ "!python eval_bbox_acc.py --EVAL_DATASET 'spair' --ANNO_SIZE 840 --TEST_SAMPLE 1 --RESULT_DIR './results_vlm/csv/test_sample_1_qwen_vl_32b.csv' --DILATION 28 --OUTPUT_CSV './results_vlm/csv/test_sample_1_qwen_vl_32b_acc.csv' --OUTPUT_WRONG_PRED './results_vlm/wrong_predictions/test_sample_1_qwen_vl_32b'"
191
+ ]
192
+ },
193
+ {
194
+ "cell_type": "code",
195
+ "execution_count": null,
196
+ "id": "9217a832",
197
+ "metadata": {},
198
+ "outputs": [],
199
+ "source": [
200
+ "!python eval_bbox_acc.py --EVAL_DATASET 'spair' --ANNO_SIZE 840 --TEST_SAMPLE 10 --RESULT_DIR './results_vlm/csv/test_sample_10_qwen_vl_32b_conf_improved_sem.csv' --DILATION 28 --OUTPUT_CSV './results_vlm/csv/test_sample_10_qwen_vl_32b_conf_improved_sem_acc.csv' --OUTPUT_WRONG_PRED './results_vlm/wrong_predictions/test_sample_10_qwen_vl_32b_conf_improved_sem'"
201
+ ]
202
+ },
203
+ {
204
+ "cell_type": "code",
205
+ "execution_count": null,
206
+ "id": "7dcf1b50",
207
+ "metadata": {},
208
+ "outputs": [],
209
+ "source": [
210
+ "!python eval_bbox_acc.py --EVAL_DATASET 'spair' --ANNO_SIZE 840 --TEST_SAMPLE 10 --RESULT_DIR './results_vlm/csv/test_sample_10_qwen_vl_32b_with_crop.csv' --DILATION 28 --OUTPUT_CSV './results_vlm/csv/test_sample_10_qwen_vl_32b_with_crop_acc.csv' --OUTPUT_WRONG_PRED './results_vlm/wrong_predictions/test_sample_10_qwen_vl_32b_with_crop'"
211
+ ]
212
+ },
213
+ {
214
+ "cell_type": "code",
215
+ "execution_count": null,
216
+ "id": "8e5978e6",
217
+ "metadata": {},
218
+ "outputs": [],
219
+ "source": [
220
+ "!python eval_bbox_acc.py --EVAL_DATASET 'spair' --ANNO_SIZE 840 --TEST_SAMPLE 10 --RESULT_DIR './results_vlm/csv/test_sample_10_qwen_vl_32b_with_crop.csv' --DILATION 84 --OUTPUT_CSV './results_vlm/csv/test_sample_10_qwen_vl_32b_with_crop_acc_dilation_84.csv' --OUTPUT_WRONG_PRED './results_vlm/wrong_predictions/test_sample_10_qwen_vl_32b_with_crop_dilation_84'"
221
+ ]
222
+ },
223
+ {
224
+ "cell_type": "code",
225
+ "execution_count": null,
226
+ "id": "1b628dc5",
227
+ "metadata": {},
228
+ "outputs": [],
229
+ "source": []
230
+ },
231
+ {
232
+ "cell_type": "markdown",
233
+ "id": "c062f2cd",
234
+ "metadata": {},
235
+ "source": [
236
+ "## VLM prediction extraction"
237
+ ]
238
+ },
239
+ {
240
+ "cell_type": "code",
241
+ "execution_count": null,
242
+ "id": "0b525eee",
243
+ "metadata": {},
244
+ "outputs": [],
245
+ "source": [
246
+ "!python extract_vlm_prediction.py --EVAL_DATASET 'spair' --TEST_SAMPLE 10 --DILATION 28 --RESULT_DIR './results_vlm/csv/test_sample_10_qwen_vl_32b.csv' --OUTPUT_DIR './results_vlm/filtered_predictions/test_sample_10_qwen_vl_32b'"
247
+ ]
248
+ },
249
+ {
250
+ "cell_type": "code",
251
+ "execution_count": null,
252
+ "id": "c0f498bb",
253
+ "metadata": {},
254
+ "outputs": [],
255
+ "source": [
256
+ "!python extract_vlm_prediction.py --EVAL_DATASET 'spair' --TEST_SAMPLE 10 --DILATION 0 --RESULT_DIR './results_vlm/csv/test_sample_10_qwen_vl_32b.csv' --OUTPUT_DIR './results_vlm/filtered_predictions/test_sample_10_qwen_vl_32b_dilation_0'"
257
+ ]
258
+ },
259
+ {
260
+ "cell_type": "code",
261
+ "execution_count": null,
262
+ "id": "c73d5f93",
263
+ "metadata": {},
264
+ "outputs": [],
265
+ "source": [
266
+ "!python extract_vlm_prediction.py --EVAL_DATASET 'spair' --TEST_SAMPLE 10 --DILATION 28 --RESULT_DIR './results_vlm/csv/test_sample_10_qwen_vl_32b.csv' --OUTPUT_DIR './results_vlm/filtered_predictions/test_sample_10_qwen_vl_32b_dilation_28'"
267
+ ]
268
+ },
269
+ {
270
+ "cell_type": "markdown",
271
+ "id": "c518febb",
272
+ "metadata": {},
273
+ "source": [
274
+ "## VLM judge"
275
+ ]
276
+ },
277
+ {
278
+ "cell_type": "code",
279
+ "execution_count": null,
280
+ "id": "f6d8b0ef",
281
+ "metadata": {},
282
+ "outputs": [],
283
+ "source": [
284
+ "!python vlm_judge_bbox_pred.py --SYSTEM_PROMPT './prompts/sys_vlm_judge.txt' --TASK_PROMPT './prompts/tsk_vlm_judge.txt' --EVAL_DATASET 'spair' --BBOX_FILE './results_vlm/csv/test_sample_10_qwen_vl_32b.csv' --EXP_NOTE 'GPT-4o Judge Test 10' --MODEL_NAME 'gpt-4o'"
285
+ ]
286
+ },
287
+ {
288
+ "cell_type": "markdown",
289
+ "id": "f308c5d5",
290
+ "metadata": {},
291
+ "source": [
292
+ "batch version has some issues"
293
+ ]
294
+ },
295
+ {
296
+ "cell_type": "code",
297
+ "execution_count": null,
298
+ "id": "e48aa4df",
299
+ "metadata": {},
300
+ "outputs": [],
301
+ "source": [
302
+ "!python vlm_judge_bbox_pred_batched.py --SYSTEM_PROMPT './prompts/sys_vlm_judge.txt' --TASK_PROMPT './prompts/tsk_vlm_judge.txt' --EVAL_DATASET 'spair' --BBOX_FILE './results_vlm/csv/test_sample_10_qwen_vl_32b.csv' --EXP_NOTE 'GPT-4.1-mini Test 10 Batched Judge Debug' --MODEL_NAME 'gpt-4.1-mini'"
303
+ ]
304
+ },
305
+ {
306
+ "cell_type": "code",
307
+ "execution_count": 5,
308
+ "id": "729dc349",
309
+ "metadata": {},
310
+ "outputs": [
311
+ {
312
+ "name": "stdout",
313
+ "output_type": "stream",
314
+ "text": [
315
+ "['devkit', 'Layout', 'PairAnnotation', 'Segmentation', 'ImageAnnotation', 'features', 'JPEGImages_bgd_rmv', 'README', 'JPEGImages']\n"
316
+ ]
317
+ }
318
+ ],
319
+ "source": [
320
+ "import os\n",
321
+ "print(os.listdir('../../Datasets/SPair-71k'))"
322
+ ]
323
+ },
324
+ {
325
+ "cell_type": "code",
326
+ "execution_count": null,
327
+ "id": "393b36c4",
328
+ "metadata": {},
329
+ "outputs": [],
330
+ "source": []
331
+ },
332
+ {
333
+ "cell_type": "markdown",
334
+ "id": "7f65b9cc",
335
+ "metadata": {},
336
+ "source": [
337
+ "## VLM judge evaluation"
338
+ ]
339
+ },
340
+ {
341
+ "cell_type": "code",
342
+ "execution_count": null,
343
+ "id": "934c7b45",
344
+ "metadata": {},
345
+ "outputs": [],
346
+ "source": [
347
+ "!python evaluate_vlm_judge.py --EVAL_DATASET 'spair' --TEST_SAMPLE 10 --DILATION 28 --RESULT_DIR './results_vlm/csv/test_sample_10_qwen_vl_32b.csv' --ANSWER_FILE './results_vlm/csv/vlm_judge_test_sample_10_gpt4.1_mini.csv'"
348
+ ]
349
+ },
350
+ {
351
+ "cell_type": "code",
352
+ "execution_count": null,
353
+ "id": "0ca2d41d",
354
+ "metadata": {},
355
+ "outputs": [],
356
+ "source": [
357
+ "!python evaluate_vlm_judge.py --EVAL_DATASET 'spair' --TEST_SAMPLE 10 --DILATION 28 --RESULT_DIR './results_vlm/csv/test_sample_10_qwen_vl_32b.csv' --ANSWER_FILE './results_vlm/csv/vlm_judge_test_sample_10_gpt4.1_mini.csv' --OUTPUT_DIR './results_vlm/filtered_predictions/test_sample_10_qwen_vl_32b_gpt4.1_mini_judge_dilation_28'"
358
+ ]
359
+ },
360
+ {
361
+ "cell_type": "code",
362
+ "execution_count": null,
363
+ "id": "3f45536f",
364
+ "metadata": {},
365
+ "outputs": [],
366
+ "source": [
367
+ "!python evaluate_vlm_judge.py --EVAL_DATASET 'spair' --TEST_SAMPLE 10 --DILATION 28 --RESULT_DIR './results_vlm/csv/test_sample_10_qwen_vl_32b.csv' --ANSWER_FILE './results_vlm/csv/vlm_judge_test_sample_10_gpt4.1_mini.csv' --OUTPUT_DIR './results_vlm/filtered_predictions/test_sample_10_qwen_vl_32b_gpt4.1mini_dilation_28_debug'"
368
+ ]
369
+ },
370
+ {
371
+ "cell_type": "code",
372
+ "execution_count": null,
373
+ "id": "196dfab7",
374
+ "metadata": {},
375
+ "outputs": [],
376
+ "source": []
377
+ },
378
+ {
379
+ "cell_type": "markdown",
380
+ "id": "25ba6a0c",
381
+ "metadata": {},
382
+ "source": [
383
+ "## GPT semantic extraction actor critic"
384
+ ]
385
+ },
386
+ {
387
+ "cell_type": "code",
388
+ "execution_count": null,
389
+ "id": "56e660a6",
390
+ "metadata": {},
391
+ "outputs": [],
392
+ "source": [
393
+ "!python inference_gpt_actor_critic.py --MODEL_NAME 'gpt-4.1' --TEST_SAMPLE 10 --EXP_NOTE 'GPT-4.1 Actor Critic Test 10' --ANNO_SIZE 840 --EVAL_DATASET 'spair' "
394
+ ]
395
+ },
396
+ {
397
+ "cell_type": "code",
398
+ "execution_count": null,
399
+ "id": "94c95ff3",
400
+ "metadata": {},
401
+ "outputs": [],
402
+ "source": [
403
+ "!python inference_gpt_actor_critic_ort_with_instruction.py --MODEL_NAME 'gpt-5' --TEST_SAMPLE 1 --EXP_NOTE 'GPT-5 Low Actor Critic Test 1' --ANNO_SIZE 840 --EVAL_DATASET 'spair' --ORIENTATION_PATH '../../Datasets/SPair-71k/OrientationAnnotation_bgd_rmv/' --REASONING_EFFORT 'low'"
404
+ ]
405
+ },
406
+ {
407
+ "cell_type": "code",
408
+ "execution_count": null,
409
+ "id": "4442548f",
410
+ "metadata": {},
411
+ "outputs": [],
412
+ "source": [
413
+ "!python inference_gpt_actor_critic_ort_with_instruction.py --MODEL_NAME 'gpt-5' --TEST_SAMPLE 1 --EXP_NOTE 'GPT-5 minimal Actor Critic Test 1' --ANNO_SIZE 840 --EVAL_DATASET 'spair' --ORIENTATION_PATH '../../Datasets/SPair-71k/OrientationAnnotation_bgd_rmv/' --REASONING_EFFORT 'minimal'"
414
+ ]
415
+ },
416
+ {
417
+ "cell_type": "code",
418
+ "execution_count": null,
419
+ "id": "62147027",
420
+ "metadata": {},
421
+ "outputs": [],
422
+ "source": [
423
+ "!python inference_gpt_actor_critic_ort_with_instruction.py --MODEL_NAME 'gpt-5' --TEST_SAMPLE 10 --EXP_NOTE 'GPT-5 Low Actor Critic Test 10 Ort Corrected' --ANNO_SIZE 840 --EVAL_DATASET 'spair' --ORIENTATION_PATH '../../Datasets/SPair-71k/OrientationAnnotation_bgd_rmv/' --REASONING_EFFORT 'low'"
424
+ ]
425
+ },
426
+ {
427
+ "cell_type": "code",
428
+ "execution_count": null,
429
+ "id": "adc21c50",
430
+ "metadata": {},
431
+ "outputs": [],
432
+ "source": [
433
+ "!python inference_gpt_actor_critic_ort_with_instruction.py --MODEL_NAME 'gpt-5' --TEST_SAMPLE 10 --EXP_NOTE 'GPT-5 Low Actor Critic Test 10 Ort Real Corrected' --ANNO_SIZE 840 --EVAL_DATASET 'spair' --ORIENTATION_PATH '../../Datasets/SPair-71k/OrientationAnnotation_bgd_rmv/' --REASONING_EFFORT 'low'"
434
+ ]
435
+ },
436
+ {
437
+ "cell_type": "markdown",
438
+ "id": "e542fe92",
439
+ "metadata": {},
440
+ "source": [
441
+ "## Crop Qwen inference"
442
+ ]
443
+ },
444
+ {
445
+ "cell_type": "code",
446
+ "execution_count": null,
447
+ "id": "7ce5f07c",
448
+ "metadata": {},
449
+ "outputs": [],
450
+ "source": [
451
+ "!python inference_batched_with_crop.py --SYSTEM_PROMPT_BBOX './prompts/sys_bbox_crop.txt' --TASK_PROMPT_BBOX './prompts/tsk_bbox_crop.txt' --EVAL_DATASET 'spair' --ANNO_SIZE 840 --MODEL_NAME 'Qwen/Qwen2.5-VL-32B-Instruct' --SEM_DIR './results_vlm/spair/GPT-4.1_Actor_Critic_Test_10/' --TEST_SAMPLE 10 --EXP_NOTE 'Qwen-VL-32B batch Sub 10 Crop'"
452
+ ]
453
+ },
454
+ {
455
+ "cell_type": "code",
456
+ "execution_count": null,
457
+ "id": "32f15b6a",
458
+ "metadata": {},
459
+ "outputs": [],
460
+ "source": [
461
+ "!python inference_batched_with_crop_ort.py --SYSTEM_PROMPT_BBOX './prompts/sys_bbox_crop_with_ort.txt' --TASK_PROMPT_BBOX './prompts/tsk_bbox_crop_with_ort.txt' --EVAL_DATASET 'spair' --ANNO_SIZE 840 --MODEL_NAME 'Qwen/Qwen2.5-VL-32B-Instruct' --SEM_DIR './results_vlm/spair/GPT-5 Low Actor Critic Test 10 Near Side Corrected/' --TEST_SAMPLE 1 --EXP_NOTE 'Qwen-VL-32B batch Sub 1 Crop GPT-5' --ORIENTATION_DIR '../../Datasets/SPair-71k/OrientationAnnotation_bgd_rmv/'"
462
+ ]
463
+ },
464
+ {
465
+ "cell_type": "code",
466
+ "execution_count": null,
467
+ "id": "3003cff6",
468
+ "metadata": {},
469
+ "outputs": [],
470
+ "source": [
471
+ "!python inference_batched_with_crop_ort.py --SYSTEM_PROMPT_BBOX './prompts/sys_bbox_crop_with_ort.txt' --TASK_PROMPT_BBOX './prompts/tsk_bbox_crop_with_ort.txt' --EVAL_DATASET 'spair' --ANNO_SIZE 840 --MODEL_NAME 'Qwen/Qwen2.5-VL-32B-Instruct' --SEM_DIR './results_vlm/spair/GPT-5 Low Actor Critic Test 10 Near Side Corrected/' --TEST_SAMPLE 10 --EXP_NOTE 'Qwen-VL-32B batch Sub 10 Crop GPT-5' --ORIENTATION_DIR '../../Datasets/SPair-71k/OrientationAnnotation_bgd_rmv/'"
472
+ ]
473
+ },
474
+ {
475
+ "cell_type": "markdown",
476
+ "id": "db178765",
477
+ "metadata": {},
478
+ "source": [
479
+ "# Orient-Anything (Assume we are inside the Baselines/Orient-Anything)"
480
+ ]
481
+ },
482
+ {
483
+ "cell_type": "code",
484
+ "execution_count": null,
485
+ "id": "2a0c1fae",
486
+ "metadata": {},
487
+ "outputs": [],
488
+ "source": [
489
+ "!python extract_orientation.py --EVAL_DATASET 'spair' --SAVE_PATH '../../../Datasets/SPair-71k/OrientationAnnotation_bgd_rmv/' --DEVICE 'cuda:1' --EXP_NOTE 'Orientation Extraction bgd rmv'"
490
+ ]
491
+ },
492
+ {
493
+ "cell_type": "code",
494
+ "execution_count": null,
495
+ "id": "55556f11",
496
+ "metadata": {},
497
+ "outputs": [],
498
+ "source": [
499
+ "!python extract_orientation.py --EVAL_DATASET 'spair' --SAVE_PATH '../../../Datasets/SPair-71k/OrientationAnnotation_bgd_rmv/' --DEVICE 'cuda:1' --EXP_NOTE 'Orientation Extraction org img'"
500
+ ]
501
+ },
502
+ {
503
+ "cell_type": "code",
504
+ "execution_count": null,
505
+ "id": "3737ff3a",
506
+ "metadata": {},
507
+ "outputs": [],
508
+ "source": [
509
+ "!python extract_orientation.py --EVAL_DATASET 'spair' --SAVE_PATH '../../../Datasets/SPair-71k/OrientationAnnotation_bgd_rmv/' --DEVICE 'cuda:1' --EXP_NOTE 'Orientation Extraction org img demo'"
510
+ ]
511
+ },
512
+ {
513
+ "cell_type": "markdown",
514
+ "id": "c24345b2",
515
+ "metadata": {},
516
+ "source": [
517
+ "evaluate"
518
+ ]
519
+ },
520
+ {
521
+ "cell_type": "code",
522
+ "execution_count": null,
523
+ "id": "ef956e0b",
524
+ "metadata": {},
525
+ "outputs": [],
526
+ "source": [
527
+ "!python evaluate_orientation.py --EVAL_DATASET 'spair' "
528
+ ]
529
+ },
530
+ {
531
+ "cell_type": "markdown",
532
+ "id": "f8df8228",
533
+ "metadata": {},
534
+ "source": [
535
+ "## Super resolution"
536
+ ]
537
+ },
538
+ {
539
+ "cell_type": "code",
540
+ "execution_count": null,
541
+ "id": "f17d8d71",
542
+ "metadata": {},
543
+ "outputs": [],
544
+ "source": [
545
+ "!python realesrgan_sr.py --EVAL_DATASET 'spair' --SAVE_PATH '../../../Datasets/SPair-71k/JPEGImages_4x/' --GPU_ID 2 --OUT_SCALE 4"
546
+ ]
547
+ },
548
+ {
549
+ "cell_type": "markdown",
550
+ "id": "0fc2c7b5",
551
+ "metadata": {},
552
+ "source": [
553
+ "## Orientation correction"
554
+ ]
555
+ },
556
+ {
557
+ "cell_type": "code",
558
+ "execution_count": null,
559
+ "id": "3f6c2c53",
560
+ "metadata": {},
561
+ "outputs": [],
562
+ "source": [
563
+ "!python near_side_correction.py"
564
+ ]
565
+ },
566
+ {
567
+ "cell_type": "markdown",
568
+ "id": "17c098a5",
569
+ "metadata": {},
570
+ "source": []
571
+ }
572
+ ],
573
+ "metadata": {
574
+ "kernelspec": {
575
+ "display_name": "qwen-vl-flash-attn",
576
+ "language": "python",
577
+ "name": "python3"
578
+ },
579
+ "language_info": {
580
+ "codemirror_mode": {
581
+ "name": "ipython",
582
+ "version": 3
583
+ },
584
+ "file_extension": ".py",
585
+ "mimetype": "text/x-python",
586
+ "name": "python",
587
+ "nbconvert_exporter": "python",
588
+ "pygments_lexer": "ipython3",
589
+ "version": "3.11.13"
590
+ }
591
+ },
592
+ "nbformat": 4,
593
+ "nbformat_minor": 5
594
+ }
Code/sc_dit/extract_3D_mesh.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ from pathlib import Path
3
+ import torch
4
+ import os
5
+ import glob
6
+
7
+ # print(project_root)
8
+ project_root = Path(__file__).resolve().parents[1]
9
+ print(os.listdir(project_root))
10
+ project_path = os.path.join(str(project_root), 'Baselines', 'CraftsMan3D')
11
+ print(os.listdir(project_path))
12
+
13
+ # print("Project root:", project_root)
14
+ # print("Project path:", project_path)
15
+
16
+ # # Add the project root to Python's path to allow for absolute imports
17
+ if str(project_path) not in sys.path:
18
+ sys.path.append(str(project_path))
19
+
20
+ # print("Current sys.path:", sys.path)
21
+ from craftsman import CraftsManPipeline
22
+
23
+ def main():
24
+
25
+ print(os.listdir('../Baselines/sd3.5'))
26
+
27
+ path_data = '../Baselines/sd3.5/spair_71k_test_examples'
28
+ categories = os.listdir(path_data)
29
+ print("Categories:", categories)
30
+
31
+ path_images = glob.glob(os.path.join(path_data, '*', '*_bgd_rmv.png'))
32
+ print("Number of images found:", len(path_images))
33
+
34
+ path_output = './example_data/spair'
35
+
36
+ device = "cuda:7"
37
+
38
+ # print(os.listdir(path_data))
39
+
40
+ # path_image = os.path.join(path_data, 'motorbike', '2009_004845_bgd_rmv.png')
41
+
42
+ pipeline = CraftsManPipeline.from_pretrained("../Baselines/CraftsMan3D/ckpts/craftsman-DoraVAE", device=device, torch_dtype=torch.bfloat16)
43
+
44
+ for path_image in path_images:
45
+ # print("Processing image:", path_image)
46
+ # Extract the category and file ID from the image path
47
+ category = os.path.basename(os.path.dirname(path_image))
48
+ file_id = os.path.splitext(os.path.basename(path_image))[0].replace('_bgd_rmv', '')
49
+
50
+ path_obj_output = os.path.join(path_output, category, f"{file_id}.obj")
51
+ if not os.path.exists(os.path.dirname(path_obj_output)):
52
+ os.makedirs(os.path.dirname(path_obj_output))
53
+ # print("Output path for mesh:", path_obj_output)
54
+
55
+ mesh = pipeline(path_image).meshes[0]
56
+ mesh.export(path_obj_output)
57
+ print(f"Exported mesh for {file_id} to {path_obj_output}")
58
+
59
+ # copy the image to the ouput directory
60
+ path_image_output = os.path.join(path_output, category, f"{file_id}_bgd_rmv.png")
61
+ if not os.path.exists(os.path.dirname(path_image_output)):
62
+ os.makedirs(os.path.dirname(path_image_output))
63
+ os.system(f"cp {path_image} {path_image_output}")
64
+ os.system(f"cp {path_image.replace('_bgd_rmv.png', '.jpg')} {path_image_output.replace('_bgd_rmv.png', '.jpg')}")
65
+
66
+ # break
67
+
68
+ # # Run the pipeline
69
+ # try:
70
+ # result = pipeline(path_image, category=category, file_id=file_id)
71
+ # meshes = result.meshes
72
+ # if meshes:
73
+ # mesh = meshes[0]
74
+ # mesh.export(f"{file_id}.obj")
75
+ # print(f"Exported mesh for {file_id} to {file_id}.obj")
76
+ # else:
77
+ # print(f"No meshes found for {file_id}")
78
+ # except Exception as e:
79
+ # print(f"Error processing {path_image}: {e}")
80
+ # mesh = pipeline(path_image).meshes[0]
81
+ # mesh.export("motorbike_1.obj")
82
+
83
+
84
+
85
+ if __name__ == "__main__":
86
+ main()
Code/sc_dit/extract_vlm_prediction.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import json
3
+ import argparse
4
+ import os
5
+ import pandas as pd
6
+ import ast
7
+ from collections import defaultdict
8
+ from tqdm import tqdm
9
+ import numpy as np
10
+
11
+ # --- Utility Functions & Imports ---
12
+ # NOTE: This script assumes you have 'utils.py' and 'dataset.py' files
13
+ # with the necessary functions as used in your original script.
14
+ from utils import load_eval_data
15
+ from dataset import get_dataset_info
16
+
17
+ def get_evaluation_outcome(pred_bbox, gt_point, dilation=0):
18
+ """
19
+ Evaluates a single prediction and returns the outcome (TP, FP, FN, or TN).
20
+
21
+ Args:
22
+ pred_bbox (list or None): The predicted bounding box [x1, y1, x2, y2].
23
+ gt_point (np.array): The ground truth keypoint [x, y].
24
+ dilation (int): Dilation pixels to expand the predicted bounding box.
25
+
26
+ Returns:
27
+ str: 'tp' (True Positive), 'fp' (False Positive),
28
+ 'fn' (False Negative), or 'tn' (True Negative).
29
+ """
30
+ # GT is visible if the point is not [0, 0]
31
+ is_gt_visible = not np.array_equal(gt_point, [0, 0])
32
+ # Prediction is valid if it's not None and not an empty placeholder
33
+ is_pred_valid = pred_bbox is not None and pred_bbox != [0, 0, 0, 0]
34
+
35
+ if is_gt_visible:
36
+ if is_pred_valid:
37
+ # GT is visible, and we made a prediction. Check if it's a hit.
38
+ x1, y1, x2, y2 = pred_bbox
39
+ if dilation > 0:
40
+ x1, y1, x2, y2 = x1 - dilation, y1 - dilation, x2 + dilation, y2 + dilation
41
+ x_gt, y_gt = gt_point
42
+ is_hit = (x1 <= x_gt <= x2) and (y1 <= y_gt <= y2)
43
+ return 'tp' if is_hit else 'fp'
44
+ else:
45
+ # GT is visible, but we did not make a prediction.
46
+ return 'fn'
47
+ else: # GT is not visible
48
+ return 'fp' if is_pred_valid else 'tn'
49
+
50
+ def process_and_filter_data(args):
51
+ """
52
+ Main function to read, evaluate, filter, and save data.
53
+ """
54
+ # --- 1. Load Data & Ground Truth ---
55
+ results_df = pd.read_csv(args.RESULT_DIR)
56
+ data_dir, categories, split = get_dataset_info(args, split='test')
57
+
58
+ print("Pre-loading ground truth data into a lookup table...")
59
+ gt_data = {}
60
+ for category in categories:
61
+ gt_data[category] = {}
62
+ files, kps, _, _ = load_eval_data(args, data_dir, category, split=split)
63
+ for i, file_path in enumerate(files):
64
+ file_id = os.path.basename(file_path).split('.')[0]
65
+ gt_data[category][file_id] = kps[i]
66
+
67
+ # --- 2. Prepare Data Structure for Final Output ---
68
+ # This structure will hold all the data for each pair before saving.
69
+ pairs_data = defaultdict(lambda: {
70
+ 'src_bbox': [None] * 30,
71
+ 'tgt_bbox': [None] * 30,
72
+ 'src_kp_gt': [None] * 30, # New field for source keypoints
73
+ 'tgt_kp_gt': [None] * 30 # New field for target keypoints
74
+ })
75
+
76
+ # --- 3. Process, Evaluate, and Filter Each Row ---
77
+ print("Evaluating predictions and applying filtering logic...")
78
+ for _, row in tqdm(results_df.iterrows(), total=len(results_df)):
79
+ category, src_id, tgt_id = row['category'], str(row['src_id']), str(row['tgt_id'])
80
+ kpt_id = int(row['kpt_id'])
81
+
82
+ if src_id not in gt_data.get(category, {}) or tgt_id not in gt_data.get(category, {}):
83
+ continue
84
+
85
+ # --- Process Source ---
86
+ kps_src_img = gt_data[category][src_id][kpt_id]
87
+ is_src_visible = kps_src_img.numpy()[2] == 1
88
+ src_kp_gt = kps_src_img.numpy()[:2] if is_src_visible else np.array([0, 0])
89
+ src_bbox_pred = ast.literal_eval(row['src_bbox']) if pd.notna(row['src_bbox']) else None
90
+ src_outcome = get_evaluation_outcome(src_bbox_pred, src_kp_gt, args.DILATION)
91
+
92
+ # --- Process Target ---
93
+ kps_tgt_img = gt_data[category][tgt_id][kpt_id]
94
+ is_tgt_visible = kps_tgt_img.numpy()[2] == 1
95
+ tgt_kp_gt = kps_tgt_img.numpy()[:2] if is_tgt_visible else np.array([0, 0])
96
+ tgt_bbox_pred = ast.literal_eval(row['tgt_bbox']) if pd.notna(row['tgt_bbox']) else None
97
+ tgt_outcome = get_evaluation_outcome(tgt_bbox_pred, tgt_kp_gt, args.DILATION)
98
+
99
+ # --- Apply Filtering Logic to Bounding Boxes ---
100
+ if src_outcome == 'tp' and src_bbox_pred is not None:
101
+ src_bbox_pred = [max(0, coord - args.DILATION) for coord in src_bbox_pred[:2]] + \
102
+ [coord + args.DILATION for coord in src_bbox_pred[2:]]
103
+ if tgt_outcome == 'tp' and tgt_bbox_pred is not None:
104
+ tgt_bbox_pred = [max(0, coord - args.DILATION) for coord in tgt_bbox_pred[:2]] + \
105
+ [coord + args.DILATION for coord in tgt_bbox_pred[2:]]
106
+
107
+ final_src_bbox = src_bbox_pred if src_outcome == 'tp' else ([0, 0, 0, 0] if src_outcome == 'tn' else None)
108
+ final_tgt_bbox = tgt_bbox_pred if tgt_outcome == 'tp' else ([0, 0, 0, 0] if tgt_outcome == 'tn' else None)
109
+
110
+ # --- Store Filtered Data ---
111
+ pair_key = (src_id, tgt_id)
112
+ if 'src_name' not in pairs_data[pair_key]:
113
+ pairs_data[pair_key]['category'] = category # Store category
114
+ pairs_data[pair_key]['src_name'] = src_id
115
+ pairs_data[pair_key]['tgt_name'] = tgt_id
116
+
117
+ if 0 <= kpt_id < 30:
118
+ pairs_data[pair_key]['src_bbox'][kpt_id] = final_src_bbox
119
+ pairs_data[pair_key]['tgt_bbox'][kpt_id] = final_tgt_bbox
120
+ # Store the ground truth keypoints (converted to list for JSON)
121
+ pairs_data[pair_key]['src_kp_gt'][kpt_id] = src_kp_gt.tolist()
122
+ pairs_data[pair_key]['tgt_kp_gt'][kpt_id] = tgt_kp_gt.tolist()
123
+
124
+ # --- 4. Save Filtered Data to JSON Files ---
125
+ output_dir = args.OUTPUT_DIR
126
+ os.makedirs(output_dir, exist_ok=True)
127
+ print(f"\nSaving {len(pairs_data)} filtered JSON files to '{output_dir}'...")
128
+
129
+ for pair_key, data in pairs_data.items():
130
+ src_id, tgt_id = pair_key
131
+ # Use hyphen in filename as per your provided code
132
+ output_filename = f"{src_id}-{tgt_id}.json"
133
+ output_filepath = os.path.join(output_dir, output_filename)
134
+ try:
135
+ with open(output_filepath, mode='w', encoding='utf-8') as jsonfile:
136
+ json.dump(data, jsonfile, indent=4)
137
+ except IOError as e:
138
+ print(f"Error writing to file '{output_filepath}': {e}")
139
+
140
+ print("Processing complete.")
141
+
142
+
143
+ if __name__ == '__main__':
144
+ parser = argparse.ArgumentParser(
145
+ description="Evaluate, filter, and convert keypoint CSV data to multiple JSON files."
146
+ )
147
+ # Arguments from your evaluation script
148
+ parser.add_argument('--EVAL_DATASET', type=str, required=True, choices=['pascal', 'spair'], help="Dataset to use for evaluation.")
149
+ parser.add_argument('--ANNO_SIZE', type=int, default=840, help="Annotation size.")
150
+ parser.add_argument('--TEST_SAMPLE', type=int, default=0, help="Number of test samples to use (0 for all).")
151
+ parser.add_argument('--RESULT_DIR', type=str, required=True, help="Path to the input CSV file with predictions.")
152
+ parser.add_argument('--DILATION', type=int, default=0, help="Dilation for bounding box evaluation.")
153
+
154
+ # Updated argument for the output directory
155
+ parser.add_argument('--OUTPUT_DIR', type=str, required=True, help="Path to the directory for the final filtered JSON files.")
156
+
157
+ args = parser.parse_args()
158
+ process_and_filter_data(args)
Code/sc_dit/gpt_utils.py ADDED
File without changes
Code/sc_dit/inference_batched.py ADDED
@@ -0,0 +1,367 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import wandb
3
+ import matplotlib.pyplot as plt
4
+ from torch.utils.data import DataLoader
5
+ import argparse
6
+ import torch
7
+ from tqdm import tqdm
8
+ from PIL import ImageDraw, ImageFont
9
+ import numpy as np
10
+ import json
11
+
12
+ # mutliprocessing for parallel job preparation
13
+ import concurrent.futures
14
+
15
+ # custom imports
16
+ from dataset import get_dataset_info, VLDataset
17
+ from utils import load_eval_data, load_img_and_kps
18
+ from qwen_utils import QwenVLDetector
19
+ # from predict_correspondence_vlm import create_image_with_one_kp
20
+
21
+ # Place the new, fast drawing function here
22
+ # def create_image_with_one_kp_pil(img, kps, kps_idx=0, circ_size=200, **kwargs):
23
+ # img_with_kp = img.copy()
24
+ # draw = ImageDraw.Draw(img_with_kp)
25
+ # cx, cy = kps[kps_idx, 0], kps[kps_idx, 1]
26
+ # radius = circ_size / 10
27
+ # bbox = [cx - radius, cy - radius, cx + radius, cy + radius]
28
+ # draw.ellipse(bbox, outline="red", width=4)
29
+ # return img_with_kp
30
+
31
+ def create_image_with_one_kp_pil(img, kps, kps_idx=0, circ_size=200, add_text=True, **kwargs):
32
+ img_with_kp = img.copy()
33
+ draw = ImageDraw.Draw(img_with_kp)
34
+ cx, cy = kps[kps_idx, 0], kps[kps_idx, 1]
35
+ radius = circ_size / 10
36
+ bbox = [cx - radius, cy - radius, cx + radius, cy + radius]
37
+ draw.ellipse(bbox, outline="red", width=4)
38
+
39
+ if add_text:
40
+ text = "Ref"
41
+ # Try to use a better font, or fall back to the default if not found
42
+ # try:
43
+ font = ImageFont.truetype("DejaVuSans.ttf", size=26)
44
+ # except IOError:
45
+ # print('test')
46
+ # font = ImageFont.load_default()
47
+
48
+ # Get text bounding box for centering
49
+ # print(font)
50
+ bbox_text = draw.textbbox((0, 0), text, font=font)
51
+ text_width = bbox_text[2] - bbox_text[0]
52
+ text_height = bbox_text[3] - bbox_text[1]
53
+
54
+ text_x = cx - text_width // 2
55
+ text_y = cy - text_height // 2
56
+ draw.text((text_x, text_y), text, font=font, fill=(255, 255, 255), text_anchor = "mm")
57
+ return img_with_kp
58
+
59
+ # Helper function to process a single keypoint (needed for parallelization)
60
+ def prepare_single_job(task_args):
61
+ img1, img2, img1_kps, kps_idx, category, src_id, tgt_id, category_prompt_sem = task_args
62
+
63
+ if img1_kps[kps_idx, 2] == 1:
64
+ # Use the fast, new function
65
+ img1_kp = create_image_with_one_kp_pil(img1, img1_kps, kps_idx=kps_idx)
66
+
67
+ return {
68
+ "img1": img1,
69
+ "img1_kp": img1_kp,
70
+ "img2": img2,
71
+ "prompt_sem": category_prompt_sem,
72
+ "metadata": { "category": category, "src_id": src_id, "tgt_id": tgt_id, "kps_idx": kps_idx }
73
+ }
74
+ return None
75
+
76
+ def run_batched_evaluation(args, model, system_prompt_sem, system_prompt_bbox, task_prompt_sem, task_prompt_bbox):
77
+ """
78
+ Runs the entire evaluation using a batched approach for maximum efficiency.
79
+ """
80
+ # --- Create save directories if provided ---
81
+ if args.SAVE_DIR:
82
+ stage2_dir = os.path.join(args.SAVE_DIR, "stage2_semantics", args.EVAL_DATASET, args.EXP_NOTE)
83
+ stage3_dir = os.path.join(args.SAVE_DIR, "stage3_bboxes", args.EVAL_DATASET, args.EXP_NOTE)
84
+ os.makedirs(stage2_dir, exist_ok=True)
85
+ os.makedirs(stage3_dir, exist_ok=True)
86
+ print(f"Intermediate results will be saved to: {args.SAVE_DIR}")
87
+
88
+
89
+ data_dir, categories, split = get_dataset_info(args, split='test')
90
+ results_table = wandb.Table(columns=["category", "src_id", "tgt_id", "kpt_id", "plot", "extracted_semantics", "src_response", "src_bbox", "src_input_size", "tgt_response", "tgt_bbox", "target_input_size"])
91
+
92
+ # --- STAGE 1: PREPARE ALL INFERENCE JOBS FIRST ---
93
+ print("--- Stage 1: Preparing all inference jobs... ---")
94
+ # inference_jobs = []
95
+ # for category in categories:
96
+ # # print(f"Preparing jobs for category: {category}")
97
+ # category_prompt_sem = task_prompt_sem.format(class_name=category)
98
+ # files, kps, _, _ = load_eval_data(args, data_dir, category, split)
99
+ # N = len(files) // 2
100
+
101
+ # # for pair_idx in range(N):
102
+ # for pair_idx in tqdm(range(N), desc=f"Processing {category} pairs"):
103
+ # img1, img1_kps = load_img_and_kps(idx=2*pair_idx, files=files, kps=kps, img_size=args.ANNO_SIZE)
104
+ # img2, _ = load_img_and_kps(idx=2*pair_idx+1, files=files, kps=kps, img_size=args.ANNO_SIZE)
105
+ # src_id = files[2*pair_idx].split('/')[-1].split('.')[0]
106
+ # tgt_id = files[2*pair_idx+1].split('/')[-1].split('.')[0]
107
+
108
+ # for kps_idx in range(img1_kps.shape[0]):
109
+ # if img1_kps[kps_idx, 2] == 1:
110
+ # # CPU-bound image creation
111
+ # img1_kp = create_image_with_one_kp(img1, img1_kps, kps_idx=kps_idx, add_text=False, add_circle=True)
112
+
113
+ # job = {
114
+ # "img1_kp": img1_kp,
115
+ # "img2": img2,
116
+ # "prompt_sem": category_prompt_sem,
117
+ # "metadata": { "category": category, "src_id": src_id, "tgt_id": tgt_id, "kps_idx": kps_idx }
118
+ # }
119
+ # inference_jobs.append(job)
120
+ # print(f"Prepared {len(inference_jobs)} total jobs.")
121
+
122
+ # First, create a flat list of all tasks to be done
123
+ tasks = []
124
+ for category in categories:
125
+ # category_prompt_sem = task_prompt_sem.format(class_name=category)
126
+ files, kps, _, _ = load_eval_data(args, data_dir, category, split)
127
+ N = len(files) // 2
128
+
129
+ # for pair_idx in range(N):
130
+ for pair_idx in tqdm(range(N), desc=f"Adding {category} pairs"):
131
+ img1, img1_kps = load_img_and_kps(idx=2*pair_idx, files=files, kps=kps, img_size=args.ANNO_SIZE)
132
+ img2, _ = load_img_and_kps(idx=2*pair_idx+1, files=files, kps=kps, img_size=args.ANNO_SIZE)
133
+ src_id = files[2*pair_idx].split('/')[-1].split('.')[0]
134
+ tgt_id = files[2*pair_idx+1].split('/')[-1].split('.')[0]
135
+
136
+ for kps_idx in range(img1_kps.shape[0]):
137
+ # point_x = int(img1_kps[kps_idx, 0])
138
+ # point_y = int(img1_kps[kps_idx, 1])
139
+ # get two decimal places
140
+ point_x = f"{img1_kps[kps_idx, 0]:.2f}"
141
+ point_y = f"{img1_kps[kps_idx, 1]:.2f}"
142
+ category_prompt_sem = task_prompt_sem.format(class_name=category, point_x=point_x, point_y=point_y)
143
+ tasks.append((img1, img2, img1_kps, kps_idx, category, src_id, tgt_id, category_prompt_sem))
144
+
145
+ # Now, process the flat list of tasks in parallel
146
+ inference_jobs = []
147
+ # Use max_workers=None to use all available CPU cores
148
+ with concurrent.futures.ProcessPoolExecutor(max_workers=8) as executor:
149
+ # `map` will apply `prepare_single_job` to each item in `tasks` across multiple processes
150
+ # `tqdm` provides a progress bar
151
+ results = list(tqdm(executor.map(prepare_single_job, tasks), total=len(tasks), desc="Preparing Jobs"))
152
+
153
+ # Filter out None results (from invisible keypoints)
154
+ inference_jobs = [job for job in results if job is not None]
155
+
156
+ print(f"Prepared {len(inference_jobs)} total jobs.")
157
+
158
+
159
+ # --- STAGE 2: BATCHED SEMANTIC EXTRACTION (1st Model Call) ---
160
+ print("\n--- Stage 2: Running batched semantic extraction... ---")
161
+ src_images = [job["img1_kp"] for job in inference_jobs]
162
+ src_prompts = [job["prompt_sem"] for job in inference_jobs]
163
+
164
+ dataset_sem = VLDataset(src_images, src_prompts)
165
+ loader_sem = DataLoader(dataset_sem, batch_size=args.BATCH_SIZE, num_workers=args.NUM_WORKERS, collate_fn=lambda x: x, pin_memory=True)
166
+
167
+ all_src_results = []
168
+ # for batch in loader_sem:
169
+ for batch in tqdm(loader_sem, desc="Processing semantic extraction batches"):
170
+ images_pil, text_prompts = zip(*batch)
171
+ # Use the base `chat` method as it seems you want raw text back
172
+ # NOTE: You might need a batched version of `chat` if it doesn't support lists
173
+ results = model.chat_batch(list(images_pil), list(text_prompts), system_prompt_sem, 'self-handled') # Assuming model.chat is updated for batching
174
+ all_src_results.extend(results)
175
+
176
+ # Extract semantics and prepare for the next stage
177
+ for i, job in enumerate(inference_jobs):
178
+ response_text = all_src_results[i]['response']
179
+ job["metadata"]["src_input_size"] = all_src_results[i]['input_size']
180
+ job["src_response"] = response_text
181
+ job["extracted_semantics"] = response_text.split("Keypoint component:")[-1].strip()
182
+ job["src_bbox"] = model._get_bounding_boxes(response_text, all_src_results[i]['input_size'], job['img1_kp'].size)
183
+
184
+
185
+ # --- SAVE STAGE 2 RESULTS IMMEDIATELY ---
186
+ if args.SAVE_DIR:
187
+ print(f"Saving Stage 2 results to {stage2_dir}...")
188
+ for job in tqdm(inference_jobs, desc="Saving Stage 2 results"):
189
+ meta = job["metadata"]
190
+ if args.DEBUG:
191
+ filename = f"DEBUG:{meta['category']}_{meta['src_id']}_{meta['tgt_id']}_kps{meta['kps_idx']}.json"
192
+ else:
193
+ filename = f"{meta['category']}_{meta['src_id']}_{meta['tgt_id']}_kps{meta['kps_idx']}.json"
194
+ save_path = os.path.join(stage2_dir, filename)
195
+ src_bbox = list(job["src_bbox"].values())[0]['bbox'] if job["src_bbox"] and job["src_bbox"].values() else None
196
+ output_data = {
197
+ "metadata": meta,
198
+ "full_response": job["src_response"],
199
+ "extracted_semantics": job["extracted_semantics"],
200
+ "source_bbox": src_bbox,
201
+ }
202
+ with open(save_path, 'w') as f:
203
+ json.dump(output_data, f, indent=4)
204
+
205
+ # --- STAGE 3: BATCHED BOUNDING BOX PREDICTION (2nd Model Call) ---
206
+ print("\n--- Stage 3: Running batched bounding box prediction... ---")
207
+ if args.DEBUG:
208
+ # print("Debug mode enabled: Saving intermediate results to disk.")
209
+ print("Debug mode enabled: Using img1 for bounding box prediction.")
210
+ tgt_images = [job["img1"] for job in inference_jobs]
211
+ else:
212
+ tgt_images = [job["img2"] for job in inference_jobs]
213
+ tgt_prompts = [task_prompt_bbox.format(class_name=job["metadata"]["category"], extracted_semantics=job["extracted_semantics"]) for job in inference_jobs]
214
+
215
+ dataset_bbox = VLDataset(tgt_images, tgt_prompts)
216
+ loader_bbox = DataLoader(dataset_bbox, batch_size=args.BATCH_SIZE, num_workers=args.NUM_WORKERS, collate_fn=lambda x: x)
217
+
218
+ all_tgt_results = []
219
+ # for batch in loader_bbox:
220
+ for batch in tqdm(loader_bbox, desc="Processing bounding box prediction batches"):
221
+ images_pil, text_prompts = zip(*batch)
222
+ # Assuming model.predict is updated for batching
223
+ results = model.predict_batch(list(images_pil), list(text_prompts), system_prompt_bbox, 'object')
224
+ all_tgt_results.extend(results)
225
+
226
+ # --- SAVE STAGE 3 RESULTS IMMEDIATELY ---
227
+ if args.SAVE_DIR:
228
+ print(f"Saving Stage 3 results to {stage3_dir}...")
229
+ for i, job in enumerate(tqdm(inference_jobs, desc="Saving Stage 3 results")):
230
+ meta = job["metadata"]
231
+ tgt_result = all_tgt_results[i]
232
+ if args.DEBUG:
233
+ tgt_bbox = model.predict_bounding_boxes(job["img1"], tgt_result['response'], tgt_result['input_size'])
234
+ else:
235
+ tgt_bbox = model.predict_bounding_boxes(job["img2"], tgt_result['response'], tgt_result['input_size'])
236
+
237
+ filename = f"{meta['category']}_{meta['src_id']}_{meta['tgt_id']}_kps{meta['kps_idx']}.json"
238
+ save_path = os.path.join(stage3_dir, filename)
239
+ output_data = {
240
+ "metadata": meta,
241
+ "full_response": tgt_result['response'],
242
+ "target_bbox": tgt_bbox,
243
+ "target_input_size": tgt_result['input_size'],
244
+ }
245
+ with open(save_path, 'w') as f:
246
+ json.dump(output_data, f, indent=4)
247
+
248
+
249
+
250
+ # --- STAGE 4: LOGGING ---
251
+ print("\n--- Stage 4: Plotting and logging results to WandB... ---")
252
+ # for i, job in enumerate(inference_jobs):
253
+ for i, job in tqdm(enumerate(inference_jobs), total=len(inference_jobs), desc="Logging results"):
254
+ meta = job["metadata"]
255
+ src_bbox_dict = job["src_bbox"]
256
+ tgt_result = all_tgt_results[i]
257
+
258
+ src_bbox = list(src_bbox_dict.values())[0]['bbox'] if src_bbox_dict else None
259
+ tgt_bbox = model.predict_bounding_boxes(job["img2"], tgt_result['response'], tgt_result['input_size'])
260
+
261
+ # Create plot
262
+ # --- COMPLETED PLOTTING LOGIC ---
263
+ # Get the source and target images for plotting
264
+ img1_kp = job["img1_kp"]
265
+ if args.DEBUG:
266
+ img2 = job["img1"] # Use img1 for bbox prediction in debug mode
267
+ else:
268
+ img2 = job["img2"]
269
+
270
+ fig, axes = plt.subplots(1, 2, figsize=(12, 6))
271
+ axes[0].imshow(np.array(img1_kp))
272
+ axes[0].axis('off')
273
+ axes[0].set_title('Source Image with Keypoint')
274
+ axes[1].imshow(np.array(img2))
275
+ axes[1].axis('off')
276
+ axes[1].set_title('Target Image with Bounding Box')
277
+ if src_bbox:
278
+ abs_x1, abs_y1, abs_x2, abs_y2 = src_bbox
279
+ axes[0].add_patch(plt.Rectangle((abs_x1, abs_y1), abs_x2 - abs_x1, abs_y2 - abs_y1, edgecolor='green', facecolor='none', linewidth=2))
280
+
281
+ if tgt_bbox:
282
+ abs_x1, abs_y1, abs_x2, abs_y2 = tgt_bbox
283
+ axes[1].add_patch(plt.Rectangle((abs_x1, abs_y1), abs_x2 - abs_x1, abs_y2 - abs_y1, edgecolor='green', facecolor='none', linewidth=2))
284
+ else:
285
+ axes[1].text(img2.width / 2, img2.height / 2, "No bounding box found", color='red', fontsize=12, ha='center', va='center')
286
+
287
+ fig.tight_layout()
288
+
289
+ wandb_img = wandb.Image(fig)
290
+ plt.close(fig)
291
+
292
+
293
+ results_table.add_data(
294
+ meta["category"],
295
+ meta["src_id"],
296
+ meta["tgt_id"],
297
+ meta["kps_idx"],
298
+ wandb_img,
299
+ job["extracted_semantics"],
300
+ job["src_response"],
301
+ str(src_bbox),
302
+ job["metadata"]["src_input_size"],
303
+ tgt_result['response'],
304
+ str(tgt_bbox),
305
+ tgt_result['input_size']
306
+ )
307
+
308
+ wandb.log({"evaluation_results": results_table})
309
+
310
+
311
+ # ===================================================================
312
+ # 4. YOUR ORIGINAL `main` FUNCTION, NOW SIMPLIFIED TO CALL THE BATCHED RUNNER
313
+ # ===================================================================
314
+ def main(args):
315
+ with open(args.SYSTEM_PROMPT_SEM, 'r') as f:
316
+ system_prompt_sem = f.read()
317
+ with open(args.SYSTEM_PROMPT_BBOX, 'r') as f:
318
+ system_prompt_bbox = f.read()
319
+ with open(args.TASK_PROMPT_SEM, 'r') as f:
320
+ task_prompt_sem = f.read()
321
+ with open(args.TASK_PROMPT_BBOX, 'r') as f:
322
+ task_prompt_bbox = f.read()
323
+
324
+ # Initialize the Qwen VLM model
325
+ print("Initializing Qwen model...")
326
+ model = QwenVLDetector(model_dir=None, torch_dtype=torch.bfloat16, model_name=args.MODEL_NAME, device="auto", flash_attn=True)
327
+ # model = QwenVLDetector(model_dir=None, torch_dtype=torch.bfloat16, model_name="Qwen/Qwen2.5-VL-7B-Instruct", device="auto", flash_attn=True)
328
+
329
+ # Initialize WandB
330
+ print("Initializing WandB...")
331
+ wandb.init(
332
+ project=args.EVAL_DATASET,
333
+ entity="amazon_intern2025",
334
+ name=args.EXP_NOTE,
335
+ config=vars(args)
336
+ )
337
+
338
+ # Run the optimized evaluation
339
+ run_batched_evaluation(args, model, system_prompt_sem, system_prompt_bbox, task_prompt_sem, task_prompt_bbox)
340
+
341
+ print('Finished processing all categories and logging results.')
342
+ wandb.finish()
343
+ print('WandB run finished.')
344
+
345
+
346
+ if __name__ == "__main__":
347
+ parser = argparse.ArgumentParser(description="Predict correspondence using Qwen VLM.")
348
+ # ... (all your existing arguments) ...
349
+ parser.add_argument('--SYSTEM_PROMPT_SEM', type=str, required=True)
350
+ parser.add_argument('--SYSTEM_PROMPT_BBOX', type=str, required=True)
351
+ parser.add_argument('--TASK_PROMPT_SEM', type=str, required=True)
352
+ parser.add_argument('--TASK_PROMPT_BBOX', type=str, required=True)
353
+ parser.add_argument('--EVAL_DATASET', type=str, required=True, choices=['pascal', 'spair'])
354
+ parser.add_argument('--ANNO_SIZE', type=int, default=840)
355
+ parser.add_argument('--TEST_SAMPLE', type=int, default=0)
356
+ parser.add_argument('--EXP_NOTE', type=str, default='Qwen VLM demo')
357
+ parser.add_argument('--SAVE_DIR', type=str, default='./results_vlm/', help='Directory to save intermediate results.')
358
+ parser.add_argument('--DEBUG', action='store_true', help='Enable debug mode for verbose output.') # decouple the prediction
359
+ parser.add_argument('--MODEL_NAME', type=str, default='Qwen/Qwen2.5-VL-32B-Instruct', help='Model name for Qwen VLM.')
360
+
361
+ # --- ADD THESE NEW ARGUMENTS for controlling batching ---
362
+ parser.add_argument('--BATCH_SIZE', type=int, default=4, help='Batch size for GPU inference.')
363
+ parser.add_argument('--NUM_WORKERS', type=int, default=8, help='Number of CPU cores for data loading.')
364
+
365
+
366
+ args = parser.parse_args()
367
+ main(args)
Code/sc_dit/inference_batched_with_conf.py ADDED
@@ -0,0 +1,409 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import wandb
3
+ import matplotlib.pyplot as plt
4
+ from torch.utils.data import DataLoader
5
+ import argparse
6
+ import torch
7
+ from tqdm import tqdm
8
+ from PIL import ImageDraw, ImageFont
9
+ import numpy as np
10
+ import json
11
+
12
+ # mutliprocessing for parallel job preparation
13
+ import concurrent.futures
14
+
15
+ # custom imports
16
+ from dataset import get_dataset_info, VLDataset
17
+ from utils import load_eval_data, load_img_and_kps
18
+ from qwen_utils import QwenVLDetector
19
+ # from predict_correspondence_vlm import create_image_with_one_kp
20
+
21
+ # Place the new, fast drawing function here
22
+ # def create_image_with_one_kp_pil(img, kps, kps_idx=0, circ_size=200, **kwargs):
23
+ # img_with_kp = img.copy()
24
+ # draw = ImageDraw.Draw(img_with_kp)
25
+ # cx, cy = kps[kps_idx, 0], kps[kps_idx, 1]
26
+ # radius = circ_size / 10
27
+ # bbox = [cx - radius, cy - radius, cx + radius, cy + radius]
28
+ # draw.ellipse(bbox, outline="red", width=4)
29
+ # return img_with_kp
30
+
31
+ def create_image_with_one_kp_pil(img, kps, kps_idx=0, circ_size=200, add_text=True, **kwargs):
32
+ img_with_kp = img.copy()
33
+ draw = ImageDraw.Draw(img_with_kp)
34
+ cx, cy = kps[kps_idx, 0], kps[kps_idx, 1]
35
+ radius = circ_size / 10
36
+ bbox = [cx - radius, cy - radius, cx + radius, cy + radius]
37
+ draw.ellipse(bbox, outline="red", width=4)
38
+
39
+ if add_text:
40
+ text = "Ref"
41
+ # Try to use a better font, or fall back to the default if not found
42
+ # try:
43
+ font = ImageFont.truetype("DejaVuSans.ttf", size=26)
44
+ # except IOError:
45
+ # print('test')
46
+ # font = ImageFont.load_default()
47
+
48
+ # Get text bounding box for centering
49
+ # print(font)
50
+ bbox_text = draw.textbbox((0, 0), text, font=font)
51
+ text_width = bbox_text[2] - bbox_text[0]
52
+ text_height = bbox_text[3] - bbox_text[1]
53
+
54
+ text_x = cx - text_width // 2
55
+ text_y = cy - text_height // 2
56
+ draw.text((text_x, text_y), text, font=font, fill=(255, 255, 255), text_anchor = "mm")
57
+ return img_with_kp
58
+
59
+ # Helper function to process a single keypoint (needed for parallelization)
60
+ def prepare_single_job(task_args):
61
+ img1, img2, img1_kps, kps_idx, category, src_id, tgt_id, category_prompt_sem = task_args
62
+
63
+ if img1_kps[kps_idx, 2] == 1:
64
+ # Use the fast, new function
65
+ img1_kp = create_image_with_one_kp_pil(img1, img1_kps, kps_idx=kps_idx)
66
+
67
+ return {
68
+ "img1": img1,
69
+ "img1_kp": img1_kp,
70
+ "img2": img2,
71
+ "prompt_sem": category_prompt_sem,
72
+ "metadata": { "category": category, "src_id": src_id, "tgt_id": tgt_id, "kps_idx": kps_idx }
73
+ }
74
+ return None
75
+
76
+ def run_batched_evaluation(args, model, system_prompt_sem, system_prompt_bbox, task_prompt_sem, task_prompt_bbox):
77
+ """
78
+ Runs the entire evaluation using a batched approach for maximum efficiency.
79
+ """
80
+ # --- Create save directories if provided ---
81
+ if args.SAVE_DIR:
82
+ stage2_dir = os.path.join(args.SAVE_DIR, "stage2_semantics", args.EVAL_DATASET, args.EXP_NOTE)
83
+ stage3_dir = os.path.join(args.SAVE_DIR, "stage3_bboxes", args.EVAL_DATASET, args.EXP_NOTE)
84
+ os.makedirs(stage2_dir, exist_ok=True)
85
+ os.makedirs(stage3_dir, exist_ok=True)
86
+ print(f"Intermediate results will be saved to: {args.SAVE_DIR}")
87
+
88
+
89
+ data_dir, categories, split = get_dataset_info(args, split='test')
90
+ results_table = wandb.Table(columns=["category", "src_id", "tgt_id", "kpt_id", "plot", "extracted_semantics", "src_response", "src_bbox", "src_input_size", "tgt_response", "tgt_bbox", "target_input_size", "tgt_bbox_conf"])
91
+
92
+ # --- STAGE 1: PREPARE ALL INFERENCE JOBS FIRST ---
93
+ print("--- Stage 1: Preparing all inference jobs... ---")
94
+ # First, create a flat list of all tasks to be done
95
+ tasks = []
96
+ for category in categories:
97
+ # category_prompt_sem = task_prompt_sem.format(class_name=category)
98
+ files, kps, _, _ = load_eval_data(args, data_dir, category, split)
99
+ N = len(files) // 2
100
+
101
+ # for pair_idx in range(N):
102
+ for pair_idx in tqdm(range(N), desc=f"Adding {category} pairs"):
103
+ img1, img1_kps = load_img_and_kps(idx=2*pair_idx, files=files, kps=kps, img_size=args.ANNO_SIZE)
104
+ img2, _ = load_img_and_kps(idx=2*pair_idx+1, files=files, kps=kps, img_size=args.ANNO_SIZE)
105
+ src_id = files[2*pair_idx].split('/')[-1].split('.')[0]
106
+ tgt_id = files[2*pair_idx+1].split('/')[-1].split('.')[0]
107
+
108
+ for kps_idx in range(img1_kps.shape[0]):
109
+ # point_x = int(img1_kps[kps_idx, 0])
110
+ # point_y = int(img1_kps[kps_idx, 1])
111
+ # get two decimal places
112
+ point_x = f"{img1_kps[kps_idx, 0]:.2f}"
113
+ point_y = f"{img1_kps[kps_idx, 1]:.2f}"
114
+ category_prompt_sem = task_prompt_sem.format(class_name=category, point_x=point_x, point_y=point_y)
115
+ tasks.append((img1, img2, img1_kps, kps_idx, category, src_id, tgt_id, category_prompt_sem))
116
+
117
+ # Now, process the flat list of tasks in parallel
118
+ inference_jobs = []
119
+ # Use max_workers=None to use all available CPU cores
120
+ with concurrent.futures.ProcessPoolExecutor(max_workers=8) as executor:
121
+ # `map` will apply `prepare_single_job` to each item in `tasks` across multiple processes
122
+ # `tqdm` provides a progress bar
123
+ results = list(tqdm(executor.map(prepare_single_job, tasks), total=len(tasks), desc="Preparing Jobs"))
124
+
125
+ # Filter out None results (from invisible keypoints)
126
+ inference_jobs = [job for job in results if job is not None]
127
+
128
+ print(f"Prepared {len(inference_jobs)} total jobs.")
129
+
130
+
131
+ # --- STAGE 2: BATCHED SEMANTIC EXTRACTION (1st Model Call) ---
132
+ # print("\n--- Stage 2: Running batched semantic extraction... ---")
133
+ # src_images = [job["img1_kp"] for job in inference_jobs]
134
+ # src_prompts = [job["prompt_sem"] for job in inference_jobs]
135
+
136
+ # dataset_sem = VLDataset(src_images, src_prompts)
137
+ # loader_sem = DataLoader(dataset_sem, batch_size=args.BATCH_SIZE, num_workers=args.NUM_WORKERS, collate_fn=lambda x: x, pin_memory=True)
138
+
139
+ # all_src_results = []
140
+ # # for batch in loader_sem:
141
+ # for batch in tqdm(loader_sem, desc="Processing semantic extraction batches"):
142
+ # images_pil, text_prompts = zip(*batch)
143
+ # # Use the base `chat` method as it seems you want raw text back
144
+ # # NOTE: You might need a batched version of `chat` if it doesn't support lists
145
+ # results = model.chat_batch(list(images_pil), list(text_prompts), system_prompt_sem, 'self-handled') # Assuming model.chat is updated for batching
146
+ # all_src_results.extend(results)
147
+
148
+ # # Extract semantics and prepare for the next stage
149
+ # for i, job in enumerate(inference_jobs):
150
+ # response_text = all_src_results[i]['response']
151
+ # job["metadata"]["src_input_size"] = all_src_results[i]['input_size']
152
+ # job["src_response"] = response_text
153
+ # job["extracted_semantics"] = response_text.split("Keypoint component:")[-1].strip()
154
+ # job["src_bbox"] = model._get_bounding_boxes(response_text, all_src_results[i]['input_size'], job['img1_kp'].size)
155
+
156
+
157
+ # --- SAVE STAGE 2 RESULTS IMMEDIATELY ---
158
+ # --- STAGE 2: BATCHED SEMANTIC EXTRACTION (OR LOAD FROM CACHE) ---
159
+ # NEW: Add a command-line argument `--skip-stage2` to your parser for this
160
+ if args.SKIP_STAGE2 and os.path.exists(stage2_dir):
161
+ print(f"--- Stage 2: SKIPPED. Loading results from {stage2_dir}... ---")
162
+
163
+ # This requires a way to match jobs to files. We'll use a dictionary lookup.
164
+ # Create a mapping from a unique job ID to its index in inference_jobs
165
+ job_map = {
166
+ f"{job['metadata']['category']}_{job['metadata']['src_id']}_{job['metadata']['tgt_id']}_kps{job['metadata']['kps_idx']}": i
167
+ for i, job in enumerate(inference_jobs)
168
+ }
169
+
170
+ # Loop through the saved files
171
+ for filename in tqdm(os.listdir(stage2_dir), desc="Loading cached Stage 2 results"):
172
+ if filename.endswith(".json"):
173
+ # Reconstruct the job ID from the filename
174
+ job_id = filename.replace('.json', '')
175
+ if job_id in job_map:
176
+ job_index = job_map[job_id]
177
+
178
+ # Load the data and populate the job
179
+ with open(os.path.join(stage2_dir, filename), 'r') as f:
180
+ cached_data = json.load(f)
181
+
182
+ inference_jobs[job_index]["extracted_semantics"] = cached_data["extracted_semantics"]
183
+ # You can add other fields if needed later, e.g., src_response
184
+ inference_jobs[job_index]["src_response"] = cached_data["full_response"]
185
+
186
+ else:
187
+ # This is your original Stage 2 code block
188
+ print("\n--- Stage 2: Running batched semantic extraction... ---")
189
+ src_images = [job["img1_kp"] for job in inference_jobs]
190
+ src_prompts = [job["prompt_sem"] for job in inference_jobs]
191
+
192
+ dataset_sem = VLDataset(src_images, src_prompts)
193
+ loader_sem = DataLoader(dataset_sem, batch_size=args.BATCH_SIZE, num_workers=args.NUM_WORKERS, collate_fn=lambda x: x, pin_memory=True)
194
+
195
+ all_src_results = []
196
+ for batch in tqdm(loader_sem, desc="Processing semantic extraction batches"):
197
+ images_pil, text_prompts = zip(*batch)
198
+ results = model.chat_batch(list(images_pil), list(text_prompts), system_prompt_sem, 'self-handled')
199
+ all_src_results.extend(results)
200
+
201
+ for i, job in enumerate(inference_jobs):
202
+ response_text = all_src_results[i]['response']
203
+ job["metadata"]["src_input_size"] = all_src_results[i]['input_size']
204
+ job["src_response"] = response_text
205
+ job["extracted_semantics"] = response_text.split("Keypoint component:")[-1].strip()
206
+ job["src_bbox"] = model._get_bounding_boxes(response_text, all_src_results[i]['input_size'], job['img1_kp'].size)
207
+
208
+ # The 'SAVE STAGE 2 RESULTS' block can remain as is, it will just be skipped if you load from cache
209
+ if args.SAVE_DIR and not args.SKIP_STAGE2:
210
+ print(f"Saving Stage 2 results to {stage2_dir}...")
211
+ for job in tqdm(inference_jobs, desc="Saving Stage 2 results"):
212
+ meta = job["metadata"]
213
+ if args.DEBUG:
214
+ filename = f"DEBUG:{meta['category']}_{meta['src_id']}_{meta['tgt_id']}_kps{meta['kps_idx']}.json"
215
+ else:
216
+ filename = f"{meta['category']}_{meta['src_id']}_{meta['tgt_id']}_kps{meta['kps_idx']}.json"
217
+ save_path = os.path.join(stage2_dir, filename)
218
+ src_bbox = list(job["src_bbox"].values())[0]['bbox'] if job["src_bbox"] and job["src_bbox"].values() else None
219
+ output_data = {
220
+ "metadata": meta,
221
+ "full_response": job["src_response"],
222
+ "extracted_semantics": job["extracted_semantics"],
223
+ "source_bbox": src_bbox,
224
+ }
225
+ with open(save_path, 'w') as f:
226
+ json.dump(output_data, f, indent=4)
227
+
228
+ # --- STAGE 3: BATCHED BOUNDING BOX PREDICTION (2nd Model Call) ---
229
+ print("\n--- Stage 3: Running batched bounding box prediction... ---")
230
+ if args.DEBUG:
231
+ # print("Debug mode enabled: Saving intermediate results to disk.")
232
+ print("Debug mode enabled: Using img1 for bounding box prediction.")
233
+ tgt_images = [job["img1"] for job in inference_jobs]
234
+ else:
235
+ tgt_images = [job["img2"] for job in inference_jobs]
236
+ tgt_prompts = [task_prompt_bbox.format(class_name=job["metadata"]["category"], extracted_semantics=job["extracted_semantics"]) for job in inference_jobs]
237
+
238
+ dataset_bbox = VLDataset(tgt_images, tgt_prompts)
239
+ loader_bbox = DataLoader(dataset_bbox, batch_size=args.BATCH_SIZE, num_workers=args.NUM_WORKERS, collate_fn=lambda x: x)
240
+
241
+ all_tgt_results = []
242
+ # for batch in loader_bbox:
243
+ for batch in tqdm(loader_bbox, desc="Processing bounding box prediction batches"):
244
+ images_pil, text_prompts = zip(*batch)
245
+ # Assuming model.predict is updated for batching
246
+ results = model.predict_batch(list(images_pil), list(text_prompts), system_prompt_bbox, 'object_with_conf')
247
+ all_tgt_results.extend(results)
248
+
249
+ # --- SAVE STAGE 3 RESULTS IMMEDIATELY ---
250
+ if args.SAVE_DIR:
251
+ print(f"Saving Stage 3 results to {stage3_dir}...")
252
+ for i, job in enumerate(tqdm(inference_jobs, desc="Saving Stage 3 results")):
253
+ meta = job["metadata"]
254
+ tgt_result = all_tgt_results[i]
255
+
256
+ # --- NEW: Use the structured output directly ---
257
+ tgt_bbox_dict = tgt_result.get('bbox_with_label', {})
258
+ # Safely get the first prediction object from the dictionary
259
+ prediction = list(tgt_bbox_dict.values())[0] if tgt_bbox_dict else None
260
+
261
+ # Extract bbox and confidence
262
+ tgt_bbox = prediction.get('bbox') if prediction else None
263
+ tgt_confidence = prediction.get('confidence') if prediction else None
264
+ # --- END NEW ---
265
+
266
+ filename = f"{meta['category']}_{meta['src_id']}_{meta['tgt_id']}_kps{meta['kps_idx']}.json"
267
+ save_path = os.path.join(stage3_dir, filename)
268
+
269
+ output_data = {
270
+ "metadata": meta,
271
+ "full_response": tgt_result['response'],
272
+ "target_bbox": tgt_bbox,
273
+ "target_input_size": tgt_result['input_size'],
274
+ "target_confidence": tgt_confidence # NEW: Save confidence to the file
275
+ }
276
+ with open(save_path, 'w') as f:
277
+ json.dump(output_data, f, indent=4)
278
+
279
+
280
+
281
+ # --- STAGE 4: LOGGING ---
282
+ print("\n--- Stage 4: Plotting and logging results to WandB... ---")
283
+ for i, job in tqdm(enumerate(inference_jobs), total=len(inference_jobs), desc="Logging results"):
284
+ meta = job["metadata"]
285
+ src_bbox_dict = job["src_bbox"]
286
+ tgt_result = all_tgt_results[i]
287
+
288
+ # --- NEW: Use the structured output directly for both src and tgt ---
289
+ src_prediction = list(src_bbox_dict.values())[0] if src_bbox_dict else None
290
+ src_bbox = src_prediction.get('bbox') if src_prediction else None
291
+
292
+ tgt_bbox_dict = tgt_result.get('bbox_with_label', {})
293
+ tgt_prediction = list(tgt_bbox_dict.values())[0] if tgt_bbox_dict else None
294
+
295
+ tgt_bbox = tgt_prediction.get('bbox') if tgt_prediction else None
296
+ tgt_confidence = tgt_prediction.get('confidence') if tgt_prediction else None
297
+ # --- END NEW ---
298
+
299
+ # Create plot
300
+ img1_kp = job["img1_kp"]
301
+ img2 = job["img1"] if args.DEBUG else job["img2"]
302
+
303
+ fig, axes = plt.subplots(1, 2, figsize=(12, 6))
304
+ axes[0].imshow(np.array(img1_kp))
305
+ axes[0].axis('off')
306
+ axes[0].set_title('Source Image with Keypoint')
307
+ axes[1].imshow(np.array(img2))
308
+ axes[1].axis('off')
309
+ axes[1].set_title('Target Image with Bounding Box')
310
+
311
+ if src_bbox:
312
+ abs_x1, abs_y1, abs_x2, abs_y2 = src_bbox
313
+ axes[0].add_patch(plt.Rectangle((abs_x1, abs_y1), abs_x2 - abs_x1, abs_y2 - abs_y1, edgecolor='green', facecolor='none', linewidth=2))
314
+
315
+ if tgt_bbox:
316
+ abs_x1, abs_y1, abs_x2, abs_y2 = tgt_bbox
317
+ axes[1].add_patch(plt.Rectangle((abs_x1, abs_y1), abs_x2 - abs_x1, abs_y2 - abs_y1, edgecolor='green', facecolor='none', linewidth=2))
318
+
319
+ # RECOMMENDED: Add confidence to the title for quick debugging
320
+ axes[1].set_title(f'Target (Conf: {tgt_confidence:.2f})')
321
+
322
+ else:
323
+ # RECOMMENDED: Display confidence on the plot when no box is found
324
+ display_text = f"Not Found (Conf: {tgt_confidence:.2f})" if tgt_confidence is not None else "Not Found"
325
+ axes[1].text(img2.width / 2, img2.height / 2, display_text, color='red', fontsize=12, ha='center', va='center')
326
+
327
+ fig.tight_layout()
328
+ wandb_img = wandb.Image(fig)
329
+ plt.close(fig)
330
+
331
+ # The column names in your table init
332
+ # "category", "src_id", "tgt_id", "kpt_id", "plot", "extracted_semantics", "src_response", "src_bbox", "src_input_size", "tgt_response", "tgt_bbox", "target_input_size", "tgt_bbox_conf"
333
+ results_table.add_data(
334
+ meta["category"],
335
+ meta["src_id"],
336
+ meta["tgt_id"],
337
+ meta["kps_idx"],
338
+ wandb_img,
339
+ job["extracted_semantics"],
340
+ job["src_response"],
341
+ str(src_bbox),
342
+ job["metadata"]["src_input_size"],
343
+ tgt_result['response'],
344
+ str(tgt_bbox),
345
+ tgt_result['input_size'],
346
+ tgt_confidence # NEW: Add the confidence value here
347
+ )
348
+
349
+ wandb.log({"evaluation_results": results_table})
350
+
351
+
352
+ # ===================================================================
353
+ # 4. YOUR ORIGINAL `main` FUNCTION, NOW SIMPLIFIED TO CALL THE BATCHED RUNNER
354
+ # ===================================================================
355
+ def main(args):
356
+ with open(args.SYSTEM_PROMPT_SEM, 'r') as f:
357
+ system_prompt_sem = f.read()
358
+ with open(args.SYSTEM_PROMPT_BBOX, 'r') as f:
359
+ system_prompt_bbox = f.read()
360
+ with open(args.TASK_PROMPT_SEM, 'r') as f:
361
+ task_prompt_sem = f.read()
362
+ with open(args.TASK_PROMPT_BBOX, 'r') as f:
363
+ task_prompt_bbox = f.read()
364
+
365
+ # Initialize the Qwen VLM model
366
+ print("Initializing Qwen model...")
367
+ model = QwenVLDetector(model_dir=None, torch_dtype=torch.bfloat16, model_name=args.MODEL_NAME, device="auto", flash_attn=True)
368
+ # model = QwenVLDetector(model_dir=None, torch_dtype=torch.bfloat16, model_name="Qwen/Qwen2.5-VL-7B-Instruct", device="auto", flash_attn=True)
369
+
370
+ # Initialize WandB
371
+ print("Initializing WandB...")
372
+ wandb.init(
373
+ project=args.EVAL_DATASET,
374
+ entity="amazon_intern2025",
375
+ name=args.EXP_NOTE,
376
+ config=vars(args)
377
+ )
378
+
379
+ # Run the optimized evaluation
380
+ run_batched_evaluation(args, model, system_prompt_sem, system_prompt_bbox, task_prompt_sem, task_prompt_bbox)
381
+
382
+ print('Finished processing all categories and logging results.')
383
+ wandb.finish()
384
+ print('WandB run finished.')
385
+
386
+
387
+ if __name__ == "__main__":
388
+ parser = argparse.ArgumentParser(description="Predict correspondence using Qwen VLM.")
389
+ # ... (all your existing arguments) ...
390
+ parser.add_argument('--SYSTEM_PROMPT_SEM', type=str, required=True)
391
+ parser.add_argument('--SYSTEM_PROMPT_BBOX', type=str, required=True)
392
+ parser.add_argument('--TASK_PROMPT_SEM', type=str, required=True)
393
+ parser.add_argument('--TASK_PROMPT_BBOX', type=str, required=True)
394
+ parser.add_argument('--EVAL_DATASET', type=str, required=True, choices=['pascal', 'spair'])
395
+ parser.add_argument('--ANNO_SIZE', type=int, default=840)
396
+ parser.add_argument('--TEST_SAMPLE', type=int, default=0)
397
+ parser.add_argument('--EXP_NOTE', type=str, default='Qwen VLM demo')
398
+ parser.add_argument('--SAVE_DIR', type=str, default='./results_vlm/', help='Directory to save intermediate results.')
399
+ parser.add_argument('--DEBUG', action='store_true', help='Enable debug mode for verbose output.') # decouple the prediction
400
+ parser.add_argument('--MODEL_NAME', type=str, default='Qwen/Qwen2.5-VL-32B-Instruct', help='Model name for Qwen VLM.')
401
+ parser.add_argument('--SKIP_STAGE2', action='store_true', help='Skip Stage 2 and load results from SAVE_DIR.')
402
+
403
+ # --- ADD THESE NEW ARGUMENTS for controlling batching ---
404
+ parser.add_argument('--BATCH_SIZE', type=int, default=4, help='Batch size for GPU inference.')
405
+ parser.add_argument('--NUM_WORKERS', type=int, default=8, help='Number of CPU cores for data loading.')
406
+
407
+
408
+ args = parser.parse_args()
409
+ main(args)
Code/sc_dit/inference_batched_with_crop.py ADDED
@@ -0,0 +1,327 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import wandb
3
+ import matplotlib.pyplot as plt
4
+ from torch.utils.data import DataLoader
5
+ import argparse
6
+ import torch
7
+ from tqdm import tqdm
8
+ from PIL import ImageDraw, ImageFont
9
+ import numpy as np
10
+ import json
11
+
12
+ # mutliprocessing for parallel job preparation
13
+ import concurrent.futures
14
+
15
+ # custom imports
16
+ from dataset import get_dataset_info, VLDatasetPaired
17
+ from utils import load_eval_data, load_img_and_kps, square_bbox_to_multiple
18
+ from qwen_utils import QwenVLDetector
19
+ # from inference_batched import create_image_with_one_kp_pil
20
+
21
+ def create_image_with_one_kp_pil(img, query_point, circ_size=200, add_text=True, **kwargs):
22
+ img_with_kp = img.copy()
23
+ draw = ImageDraw.Draw(img_with_kp)
24
+ # cx, cy = kps[kps_idx, 0], kps[kps_idx, 1]
25
+ cx, cy = query_point[0], query_point[1]
26
+ radius = circ_size / 10
27
+ bbox = [cx - radius, cy - radius, cx + radius, cy + radius]
28
+ draw.ellipse(bbox, outline="red", width=4)
29
+
30
+ if add_text:
31
+ text = "Ref"
32
+ font = ImageFont.truetype("DejaVuSans.ttf", size=26)
33
+ bbox_text = draw.textbbox((0, 0), text, font=font)
34
+ text_width = bbox_text[2] - bbox_text[0]
35
+ text_height = bbox_text[3] - bbox_text[1]
36
+
37
+ text_x = cx - text_width // 2
38
+ text_y = cy - text_height // 2
39
+ draw.text((text_x, text_y), text, font=font, fill=(255, 255, 255), text_anchor = "mm")
40
+ return img_with_kp
41
+
42
+
43
+ # Helper function to process a single keypoint (needed for parallelization)
44
+ def prepare_single_job(task_args):
45
+ img1, img2, img1_kps, kps_idx, category, src_id, tgt_id, kp_semantics = task_args
46
+
47
+ if img1_kps[kps_idx, 2] == 1: # double check if the keypoint is visible
48
+ # Use the fast, new function
49
+ # img1_kp = create_image_with_one_kp_pil(img1, img1_kps, kps_idx=kps_idx)
50
+
51
+ img1_bbox = square_bbox_to_multiple(kp_semantics['proposed_bbox'], multiple=14)
52
+ img1_crop = img1.crop(img1_bbox)
53
+
54
+ qeury_point = (img1_kps[kps_idx, 0], img1_kps[kps_idx, 1])
55
+
56
+ return {
57
+ "img1": img1,
58
+ "img1_bbox": img1_bbox,
59
+ "img1_crop": img1_crop,
60
+ "img1_kp_semantics": kp_semantics,
61
+ "qeury_point": qeury_point,
62
+ "img2": img2,
63
+ "metadata": { "category": category, "src_id": src_id, "tgt_id": tgt_id, "kps_idx": kps_idx }
64
+ }
65
+ return None
66
+
67
+
68
+ def prepare_inference_job(args):
69
+
70
+ data_dir, categories, split = get_dataset_info(args, split='test')
71
+
72
+ # --- STAGE 1: PREPARE ALL INFERENCE JOBS FIRST ---
73
+ print("--- Stage 1: Preparing all inference jobs... ---")
74
+ # First, create a flat list of all tasks to be done
75
+ tasks = []
76
+ for category in categories:
77
+ # category_prompt_sem = task_prompt_sem.format(class_name=category)
78
+ files, kps, _, used_kps = load_eval_data(args, data_dir, category, split)
79
+ N = len(files) // 2
80
+
81
+ # for pair_idx in range(N):
82
+ for pair_idx in tqdm(range(N), desc=f"Adding {category} pairs"):
83
+ img1, img1_kps = load_img_and_kps(idx=2*pair_idx, files=files, kps=kps, img_size=args.ANNO_SIZE)
84
+ img2, _ = load_img_and_kps(idx=2*pair_idx+1, files=files, kps=kps, img_size=args.ANNO_SIZE)
85
+ src_id = files[2*pair_idx].split('/')[-1].split('.')[0]
86
+ tgt_id = files[2*pair_idx+1].split('/')[-1].split('.')[0]
87
+
88
+ # print('check folder', os.listdir('./results_vlm/spair/GPT-4.1_Actor_Critic_Test_10'))
89
+ # print('all files:', os.listdir(args.SEM_DIR))
90
+
91
+ file_json_path = os.path.join(args.SEM_DIR, category, f"{src_id}.json")
92
+ sem_json = json.load(open(file_json_path, 'r'))
93
+ print('keys:', sem_json.keys())
94
+
95
+ # break
96
+ assert img1_kps.shape[0] == used_kps.shape[0], "Keypoints mismatch"
97
+ n_kps = img1_kps.shape[0]
98
+
99
+ for kps_idx in range(n_kps):
100
+ # print('this is the src_id:', src_id)
101
+ # print('this is the kps_idx:', kps_idx)
102
+ # print('img1_kps:', img1_kps[kps_idx])
103
+ if img1_kps[kps_idx, 2] == 1: # check if the keypoint is visible
104
+ used_kps_id = used_kps[kps_idx].item()
105
+ # print('used_kps_id:', used_kps_id)
106
+ kp_semantics = sem_json['keypoints'][str(used_kps_id)]['actor_output']
107
+ # print('kp_semantics:', kp_semantics)
108
+ tasks.append((img1, img2, img1_kps, kps_idx, category, src_id, tgt_id, kp_semantics))
109
+
110
+ # Now, process the flat list of tasks in parallel
111
+ inference_jobs = []
112
+ # Use max_workers=None to use all available CPU cores
113
+ with concurrent.futures.ProcessPoolExecutor(max_workers=8) as executor:
114
+ # `map` will apply `prepare_single_job` to each item in `tasks` across multiple processes
115
+ # `tqdm` provides a progress bar
116
+ results = list(tqdm(executor.map(prepare_single_job, tasks), total=len(tasks), desc="Preparing Jobs"))
117
+
118
+ # Filter out None results (from invisible keypoints)
119
+ inference_jobs = [job for job in results if job is not None]
120
+
121
+ return inference_jobs
122
+
123
+
124
+ def run_batched_evaluation(args, model, system_prompt_bbox, task_prompt_bbox, inference_jobs):
125
+ """
126
+ Runs the entire evaluation using a batched approach for maximum efficiency.
127
+ """
128
+
129
+ # --- Create save directories if provided ---
130
+ if args.SAVE_DIR:
131
+ stage3_dir = os.path.join(args.SAVE_DIR, "stage3_bboxes", args.EVAL_DATASET, args.EXP_NOTE)
132
+ os.makedirs(stage3_dir, exist_ok=True)
133
+ print(f"Intermediate results will be saved to: {args.SAVE_DIR}")
134
+
135
+
136
+
137
+ print(f"Total inference jobs prepared: {len(inference_jobs)}")
138
+
139
+
140
+ # --- STAGE 3: BATCHED BOUNDING BOX PREDICTION (2nd Model Call) ---
141
+ print("\n--- Stage 3: Running batched bounding box prediction... ---")
142
+
143
+ src_crops = [job["img1_crop"] for job in inference_jobs]
144
+ tgt_images = [job["img2"] for job in inference_jobs]
145
+ tgt_prompts = [task_prompt_bbox.format(class_name=job["metadata"]["category"],
146
+ part_name=job["img1_kp_semantics"]['part_name'],
147
+ orientation=job["img1_kp_semantics"]['orientation'],
148
+ spatial_location=job["img1_kp_semantics"]['spatial_location'],
149
+ object_facing_direction=job["img1_kp_semantics"]['object_facing_direction']) for job in inference_jobs]
150
+
151
+ dataset_bbox = VLDatasetPaired(src_crops, tgt_images, tgt_prompts)
152
+ loader_bbox = DataLoader(dataset_bbox, batch_size=args.BATCH_SIZE, num_workers=args.NUM_WORKERS, collate_fn=lambda x: x)
153
+
154
+ all_tgt_results = []
155
+ # for batch in loader_bbox:
156
+ for batch in tqdm(loader_bbox, desc="Processing bounding box prediction batches"):
157
+ images1_crop_pil, images2_pil, text_prompts = zip(*batch)
158
+ # Assuming model.predict is updated for batching
159
+ results = model.predict_paired_batch(list(images1_crop_pil), list(images2_pil), list(text_prompts), system_prompt_bbox, 'self-handled')
160
+ # print('results:', results)
161
+
162
+ all_tgt_results.extend(results)
163
+
164
+ if args.DEBUG:
165
+ break
166
+
167
+ # --- SAVE STAGE 3 RESULTS IMMEDIATELY ---
168
+ if args.SAVE_DIR:
169
+ print(f"Saving Stage 3 results to {stage3_dir}...")
170
+ for i, job in enumerate(tqdm(inference_jobs, desc="Saving Stage 3 results")):
171
+ meta = job["metadata"]
172
+ tgt_result = all_tgt_results[i]
173
+
174
+ filename = f"{meta['category']}_{meta['src_id']}_{meta['tgt_id']}_kps{meta['kps_idx']}.json"
175
+ save_path = os.path.join(stage3_dir, filename)
176
+ output_data = {
177
+ "metadata": meta,
178
+ "target_bbox": tgt_result['img2_bbox'],
179
+ "visible": tgt_result['visible'],
180
+ 'label': tgt_result['label'],
181
+ "crop_size": tgt_result['crop_size'],
182
+ "img_size": tgt_result['img_size'],
183
+ "full_response": tgt_result['reasoning'],
184
+ }
185
+ with open(save_path, 'w') as f:
186
+ json.dump(output_data, f, indent=4)
187
+
188
+ if args.DEBUG:
189
+ break
190
+
191
+ return all_tgt_results
192
+
193
+ def dict_to_string(dict_data):
194
+ """
195
+ Converts a dictionary to a string representation.
196
+ """
197
+ return ', '.join(f"{key}: {value}" for key, value in dict_data.items() if value is not None)
198
+
199
+
200
+ def wandb_logging(inference_jobs, all_tgt_results):
201
+ # initialize the wandb table
202
+ results_table = wandb.Table(columns=["category", "src_id", "tgt_id", "kpt_id", "plot", "extracted_semantics", "src_bbox", "tgt_img_size", "src_crop_size", "tgt_response", "tgt_bbox"])
203
+
204
+ print("\n--- Stage 4: Plotting and logging results to WandB... ---")
205
+ # for i, job in enumerate(inference_jobs):
206
+
207
+ for i, job in tqdm(enumerate(inference_jobs), total=len(inference_jobs), desc="Logging results"):
208
+ meta = job["metadata"]
209
+ src_bbox = job["img1_bbox"]
210
+ tgt_result = all_tgt_results[i]
211
+
212
+ # src_bbox = list(src_bbox_dict.values())[0]['bbox'] if src_bbox_dict else None
213
+ # src_bbox = src
214
+ tgt_bbox = tgt_result['img2_bbox'] if tgt_result['visible'] else None
215
+ # tgt_bbox = model.predict_bounding_boxes(job["img2"], tgt_result['response'], tgt_result['input_size'])
216
+
217
+ # Create plot
218
+ # --- COMPLETED PLOTTING LOGIC ---
219
+ # Get the source and target images for plotting
220
+
221
+ img1_kp = create_image_with_one_kp_pil(job["img1"], job["qeury_point"], circ_size=200, add_text=True)
222
+ img2 = job["img2"]
223
+
224
+ fig, axes = plt.subplots(1, 2, figsize=(12, 6))
225
+ axes[0].imshow(np.array(img1_kp))
226
+ axes[0].axis('off')
227
+ axes[0].set_title('Source Image with Keypoint')
228
+ axes[1].imshow(np.array(img2))
229
+ axes[1].axis('off')
230
+ axes[1].set_title('Target Image with Bounding Box')
231
+ if src_bbox:
232
+ abs_x1, abs_y1, abs_x2, abs_y2 = src_bbox
233
+ axes[0].add_patch(plt.Rectangle((abs_x1, abs_y1), abs_x2 - abs_x1, abs_y2 - abs_y1, edgecolor='green', facecolor='none', linewidth=2))
234
+
235
+ if tgt_bbox:
236
+ abs_x1, abs_y1, abs_x2, abs_y2 = tgt_bbox
237
+ axes[1].add_patch(plt.Rectangle((abs_x1, abs_y1), abs_x2 - abs_x1, abs_y2 - abs_y1, edgecolor='green', facecolor='none', linewidth=2))
238
+ else:
239
+ axes[1].text(img2.width / 2, img2.height / 2, "No bounding box found", color='red', fontsize=12, ha='center', va='center')
240
+
241
+ fig.tight_layout()
242
+
243
+ wandb_img = wandb.Image(fig)
244
+ plt.close(fig)
245
+
246
+
247
+ results_table.add_data(
248
+ meta["category"],
249
+ meta["src_id"],
250
+ meta["tgt_id"],
251
+ meta["kps_idx"],
252
+ wandb_img,
253
+ dict_to_string(job["img1_kp_semantics"]),
254
+ str(src_bbox),
255
+ str(all_tgt_results[i]['img_size']),
256
+ str(all_tgt_results[i]['crop_size']),
257
+ tgt_result['raw_response_text'],
258
+ str(tgt_bbox),
259
+ )
260
+
261
+ if args.DEBUG:
262
+ break
263
+
264
+ wandb.log({"evaluation_results": results_table})
265
+
266
+
267
+
268
+ # ===================================================================
269
+ # 4. YOUR ORIGINAL `main` FUNCTION, NOW SIMPLIFIED TO CALL THE BATCHED RUNNER
270
+ # ===================================================================
271
+ def main(args):
272
+ with open(args.SYSTEM_PROMPT_BBOX, 'r') as f:
273
+ system_prompt_bbox = f.read()
274
+ with open(args.TASK_PROMPT_BBOX, 'r') as f:
275
+ task_prompt_bbox = f.read()
276
+
277
+ # Initialize the Qwen VLM model
278
+ print("Initializing Qwen model...")
279
+ model = QwenVLDetector(model_dir=None, torch_dtype=torch.bfloat16, model_name=args.MODEL_NAME, device="auto", flash_attn=True)
280
+ # model = QwenVLDetector(model_dir=None, torch_dtype=torch.bfloat16, model_name="Qwen/Qwen2.5-VL-7B-Instruct", device="auto", flash_attn=True)
281
+
282
+ # Initialize WandB
283
+ print("Initializing WandB...")
284
+ wandb.init(
285
+ project=args.EVAL_DATASET,
286
+ entity="amazon_intern2025",
287
+ name=args.EXP_NOTE,
288
+ config=vars(args)
289
+ )
290
+
291
+ # prepare inference jobs
292
+ inference_jobs = prepare_inference_job(args)
293
+
294
+ # Run the optimized evaluation
295
+ all_tgt_results = run_batched_evaluation(args, model, system_prompt_bbox, task_prompt_bbox, inference_jobs)
296
+
297
+ # Log results to WandB
298
+ wandb_logging(inference_jobs, all_tgt_results)
299
+
300
+
301
+ print('Finished processing all categories and logging results.')
302
+ wandb.finish()
303
+ print('WandB run finished.')
304
+
305
+
306
+ if __name__ == "__main__":
307
+ parser = argparse.ArgumentParser(description="Predict correspondence using Qwen VLM.")
308
+ # ... (all your existing arguments) ...
309
+ parser.add_argument('--SYSTEM_PROMPT_BBOX', type=str, required=True)
310
+ parser.add_argument('--TASK_PROMPT_BBOX', type=str, required=True)
311
+ parser.add_argument('--EVAL_DATASET', type=str, required=True, choices=['pascal', 'spair'])
312
+ parser.add_argument('--SEM_DIR', type=str, required=True, help='Directory containing semantic annotations.')
313
+ parser.add_argument('--ANNO_SIZE', type=int, default=840)
314
+ parser.add_argument('--TEST_SAMPLE', type=int, default=0)
315
+ parser.add_argument('--EXP_NOTE', type=str, default='Qwen VLM demo')
316
+ parser.add_argument('--SAVE_DIR', type=str, default='./results_vlm/', help='Directory to save intermediate results.')
317
+
318
+ parser.add_argument('--DEBUG', action='store_true', help='Enable debug mode for verbose output.') # decouple the prediction
319
+ parser.add_argument('--MODEL_NAME', type=str, default='Qwen/Qwen2.5-VL-32B-Instruct', help='Model name for Qwen VLM.')
320
+
321
+ # --- ADD THESE NEW ARGUMENTS for controlling batching ---
322
+ parser.add_argument('--BATCH_SIZE', type=int, default=4, help='Batch size for GPU inference.')
323
+ parser.add_argument('--NUM_WORKERS', type=int, default=8, help='Number of CPU cores for data loading.')
324
+
325
+
326
+ args = parser.parse_args()
327
+ main(args)
Code/sc_dit/inference_batched_with_crop_ort.py ADDED
@@ -0,0 +1,355 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import wandb
3
+ import matplotlib.pyplot as plt
4
+ from torch.utils.data import DataLoader
5
+ import argparse
6
+ import torch
7
+ from tqdm import tqdm
8
+ from PIL import ImageDraw, ImageFont
9
+ import numpy as np
10
+ import json
11
+
12
+ # mutliprocessing for parallel job preparation
13
+ import concurrent.futures
14
+
15
+ # custom imports
16
+ from dataset import get_dataset_info, VLDatasetPaired
17
+ from utils import load_eval_data, load_img_and_kps, square_bbox_to_multiple
18
+ from qwen_utils import QwenVLDetector
19
+ # from inference_batched import create_image_with_one_kp_pil
20
+ from inference_gpt_actor_critic_ort_with_instruction import describe_orientation
21
+
22
+ def create_image_with_one_kp_pil(img, query_point, circ_size=200, add_text=True, **kwargs):
23
+ img_with_kp = img.copy()
24
+ draw = ImageDraw.Draw(img_with_kp)
25
+ # cx, cy = kps[kps_idx, 0], kps[kps_idx, 1]
26
+ cx, cy = query_point[0], query_point[1]
27
+ radius = circ_size / 10
28
+ bbox = [cx - radius, cy - radius, cx + radius, cy + radius]
29
+ draw.ellipse(bbox, outline="red", width=4)
30
+
31
+ if add_text:
32
+ text = "Ref"
33
+ font = ImageFont.truetype("DejaVuSans.ttf", size=26)
34
+ bbox_text = draw.textbbox((0, 0), text, font=font)
35
+ text_width = bbox_text[2] - bbox_text[0]
36
+ text_height = bbox_text[3] - bbox_text[1]
37
+
38
+ text_x = cx - text_width // 2
39
+ text_y = cy - text_height // 2
40
+ draw.text((text_x, text_y), text, font=font, fill=(255, 255, 255), text_anchor = "mm")
41
+ return img_with_kp
42
+
43
+
44
+ # Helper function to process a single keypoint (needed for parallelization)
45
+ def prepare_single_job(task_args):
46
+ img1, img2, img1_kps, kps_idx, category, src_id, tgt_id, kp_semantics, src_ort_hint, tgt_ort_hint = task_args
47
+
48
+ if img1_kps[kps_idx, 2] == 1: # double check if the keypoint is visible
49
+ # Use the fast, new function
50
+ # img1_kp = create_image_with_one_kp_pil(img1, img1_kps, kps_idx=kps_idx)
51
+
52
+ img1_bbox = square_bbox_to_multiple(kp_semantics['proposed_bbox'], multiple=14)
53
+ img1_crop = img1.crop(img1_bbox)
54
+
55
+ qeury_point = (img1_kps[kps_idx, 0], img1_kps[kps_idx, 1])
56
+
57
+ return {
58
+ "img1": img1,
59
+ "img1_bbox": img1_bbox,
60
+ "img1_crop": img1_crop,
61
+ "img1_kp_semantics": kp_semantics,
62
+ "src_ort_hint": src_ort_hint,
63
+ "tgt_ort_hint": tgt_ort_hint,
64
+ "qeury_point": qeury_point,
65
+ "img2": img2,
66
+ "metadata": { "category": category, "src_id": src_id, "tgt_id": tgt_id, "kps_idx": kps_idx }
67
+ }
68
+ return None
69
+
70
+
71
+ def prepare_inference_job(args):
72
+
73
+ data_dir, categories, split = get_dataset_info(args, split='test')
74
+
75
+ # --- STAGE 1: PREPARE ALL INFERENCE JOBS FIRST ---
76
+ print("--- Stage 1: Preparing all inference jobs... ---")
77
+ # First, create a flat list of all tasks to be done
78
+ tasks = []
79
+ for category in categories:
80
+ # category_prompt_sem = task_prompt_sem.format(class_name=category)
81
+ files, kps, _, used_kps = load_eval_data(args, data_dir, category, split)
82
+ N = len(files) // 2
83
+
84
+ # for pair_idx in range(N):
85
+ for pair_idx in tqdm(range(N), desc=f"Adding {category} pairs"):
86
+ img1, img1_kps = load_img_and_kps(idx=2*pair_idx, files=files, kps=kps, img_size=args.ANNO_SIZE)
87
+ img2, _ = load_img_and_kps(idx=2*pair_idx+1, files=files, kps=kps, img_size=args.ANNO_SIZE)
88
+ src_id = files[2*pair_idx].split('/')[-1].split('.')[0]
89
+ tgt_id = files[2*pair_idx+1].split('/')[-1].split('.')[0]
90
+
91
+ # print('check folder', os.listdir('./results_vlm/spair/GPT-4.1_Actor_Critic_Test_10'))
92
+ # print('all files:', os.listdir(args.SEM_DIR))
93
+
94
+ file_json_path = os.path.join(args.SEM_DIR, category, f"{src_id}.json")
95
+ sem_json = json.load(open(file_json_path, 'r'))
96
+ print('keys:', sem_json.keys())
97
+
98
+ src_ort_hint_path = os.path.join(args.ORIENTATION_DIR, category, f"{src_id}.json")
99
+ tgt_ort_hint_path = os.path.join(args.ORIENTATION_DIR, category, f"{tgt_id}.json")
100
+
101
+ with open(src_ort_hint_path, 'r') as f:
102
+ src_orientation_json = json.load(f)
103
+
104
+ with open(tgt_ort_hint_path, 'r') as f:
105
+ tgt_orientation_json = json.load(f)
106
+
107
+ src_ort_hint = str(describe_orientation(
108
+ azimuth=src_orientation_json.get("azimuth"),
109
+ polar=src_orientation_json.get("polar"),
110
+ confidence=src_orientation_json.get("confidence")
111
+ ))
112
+ tgt_ort_hint = str(describe_orientation(
113
+ azimuth=tgt_orientation_json.get("azimuth"),
114
+ polar=tgt_orientation_json.get("polar"),
115
+ confidence=tgt_orientation_json.get("confidence")
116
+ ))
117
+
118
+ # break
119
+ assert img1_kps.shape[0] == used_kps.shape[0], "Keypoints mismatch"
120
+ n_kps = img1_kps.shape[0]
121
+
122
+ for kps_idx in range(n_kps):
123
+ # print('this is the src_id:', src_id)
124
+ # print('this is the kps_idx:', kps_idx)
125
+ # print('img1_kps:', img1_kps[kps_idx])
126
+ if img1_kps[kps_idx, 2] == 1: # check if the keypoint is visible
127
+ used_kps_id = used_kps[kps_idx].item()
128
+ # print('used_kps_id:', used_kps_id)
129
+ kp_semantics = sem_json['keypoints'][str(used_kps_id)]['actor_output']
130
+ # print('kp_semantics:', kp_semantics)
131
+ tasks.append((img1, img2, img1_kps, kps_idx, category, src_id, tgt_id, kp_semantics, src_ort_hint, tgt_ort_hint))
132
+
133
+ # Now, process the flat list of tasks in parallel
134
+ inference_jobs = []
135
+ # Use max_workers=None to use all available CPU cores
136
+ with concurrent.futures.ProcessPoolExecutor(max_workers=8) as executor:
137
+ # `map` will apply `prepare_single_job` to each item in `tasks` across multiple processes
138
+ # `tqdm` provides a progress bar
139
+ results = list(tqdm(executor.map(prepare_single_job, tasks), total=len(tasks), desc="Preparing Jobs"))
140
+
141
+ # Filter out None results (from invisible keypoints)
142
+ inference_jobs = [job for job in results if job is not None]
143
+
144
+ return inference_jobs
145
+
146
+
147
+ def run_batched_evaluation(args, model, system_prompt_bbox, task_prompt_bbox, inference_jobs):
148
+ """
149
+ Runs the entire evaluation using a batched approach for maximum efficiency.
150
+ """
151
+
152
+ # --- Create save directories if provided ---
153
+ if args.SAVE_DIR:
154
+ stage3_dir = os.path.join(args.SAVE_DIR, "stage3_bboxes", args.EVAL_DATASET, args.EXP_NOTE)
155
+ os.makedirs(stage3_dir, exist_ok=True)
156
+ print(f"Intermediate results will be saved to: {args.SAVE_DIR}")
157
+
158
+
159
+
160
+ print(f"Total inference jobs prepared: {len(inference_jobs)}")
161
+
162
+
163
+ # --- STAGE 3: BATCHED BOUNDING BOX PREDICTION (2nd Model Call) ---
164
+ print("\n--- Stage 3: Running batched bounding box prediction... ---")
165
+
166
+ src_crops = [job["img1_crop"] for job in inference_jobs]
167
+ tgt_images = [job["img2"] for job in inference_jobs]
168
+ tgt_prompts = [task_prompt_bbox.format(class_name=job["metadata"]["category"],
169
+ part_name=job["img1_kp_semantics"]['part_name'],
170
+ part_location=job["img1_kp_semantics"]['part_location'],
171
+ spatial_location=job["img1_kp_semantics"]['spatial_location'],
172
+ object_facing_direction=job["img1_kp_semantics"]['object_facing_direction'],
173
+ orientation_hint_src=job["src_ort_hint"],orientation_hint_tgt=job["tgt_ort_hint"])
174
+ for job in inference_jobs]
175
+
176
+ dataset_bbox = VLDatasetPaired(src_crops, tgt_images, tgt_prompts)
177
+ loader_bbox = DataLoader(dataset_bbox, batch_size=args.BATCH_SIZE, num_workers=args.NUM_WORKERS, collate_fn=lambda x: x)
178
+
179
+ all_tgt_results = []
180
+ # for batch in loader_bbox:
181
+ for batch in tqdm(loader_bbox, desc="Processing bounding box prediction batches"):
182
+ images1_crop_pil, images2_pil, text_prompts = zip(*batch)
183
+ # Assuming model.predict is updated for batching
184
+ results = model.predict_paired_batch(list(images1_crop_pil), list(images2_pil), list(text_prompts), system_prompt_bbox, 'self-handled')
185
+ # print('results:', results)
186
+
187
+ all_tgt_results.extend(results)
188
+
189
+ if args.DEBUG:
190
+ break
191
+
192
+ # --- SAVE STAGE 3 RESULTS IMMEDIATELY ---
193
+ if args.SAVE_DIR:
194
+ print(f"Saving Stage 3 results to {stage3_dir}...")
195
+ for i, job in enumerate(tqdm(inference_jobs, desc="Saving Stage 3 results")):
196
+ meta = job["metadata"]
197
+ tgt_result = all_tgt_results[i]
198
+
199
+ filename = f"{meta['category']}_{meta['src_id']}_{meta['tgt_id']}_kps{meta['kps_idx']}.json"
200
+ save_path = os.path.join(stage3_dir, filename)
201
+ output_data = {
202
+ "metadata": meta,
203
+ "target_bbox": tgt_result['img2_bbox'],
204
+ "visible": tgt_result['visible'],
205
+ 'label': tgt_result['label'],
206
+ "crop_size": tgt_result['crop_size'],
207
+ "img_size": tgt_result['img_size'],
208
+ "full_response": tgt_result['reasoning'],
209
+ }
210
+ with open(save_path, 'w') as f:
211
+ json.dump(output_data, f, indent=4)
212
+
213
+ if args.DEBUG:
214
+ break
215
+
216
+ return all_tgt_results
217
+
218
+ def dict_to_string(dict_data):
219
+ """
220
+ Converts a dictionary to a string representation.
221
+ """
222
+ return ', '.join(f"{key}: {value}" for key, value in dict_data.items() if value is not None)
223
+
224
+
225
+ def wandb_logging(inference_jobs, all_tgt_results):
226
+ # initialize the wandb table
227
+ results_table = wandb.Table(columns=["category", "src_id", "tgt_id", "kpt_id", "plot", "extracted_semantics", "src_bbox", "tgt_img_size", "src_crop_size", "tgt_response", "tgt_bbox", "src_ort_hint", "tgt_ort_hint"])
228
+
229
+ print("\n--- Stage 4: Plotting and logging results to WandB... ---")
230
+ # for i, job in enumerate(inference_jobs):
231
+
232
+ for i, job in tqdm(enumerate(inference_jobs), total=len(inference_jobs), desc="Logging results"):
233
+ meta = job["metadata"]
234
+ src_bbox = job["img1_bbox"]
235
+ tgt_result = all_tgt_results[i]
236
+
237
+ # src_bbox = list(src_bbox_dict.values())[0]['bbox'] if src_bbox_dict else None
238
+ # src_bbox = src
239
+ tgt_bbox = tgt_result['img2_bbox'] if tgt_result['visible'] else None
240
+ # tgt_bbox = model.predict_bounding_boxes(job["img2"], tgt_result['response'], tgt_result['input_size'])
241
+
242
+ # Create plot
243
+ # --- COMPLETED PLOTTING LOGIC ---
244
+ # Get the source and target images for plotting
245
+
246
+ img1_kp = create_image_with_one_kp_pil(job["img1"], job["qeury_point"], circ_size=200, add_text=True)
247
+ img2 = job["img2"]
248
+
249
+ fig, axes = plt.subplots(1, 2, figsize=(12, 6))
250
+ axes[0].imshow(np.array(img1_kp))
251
+ axes[0].axis('off')
252
+ axes[0].set_title('Source Image with Keypoint')
253
+ axes[1].imshow(np.array(img2))
254
+ axes[1].axis('off')
255
+ axes[1].set_title('Target Image with Bounding Box')
256
+ if src_bbox:
257
+ abs_x1, abs_y1, abs_x2, abs_y2 = src_bbox
258
+ axes[0].add_patch(plt.Rectangle((abs_x1, abs_y1), abs_x2 - abs_x1, abs_y2 - abs_y1, edgecolor='green', facecolor='none', linewidth=2))
259
+
260
+ if tgt_bbox:
261
+ abs_x1, abs_y1, abs_x2, abs_y2 = tgt_bbox
262
+ axes[1].add_patch(plt.Rectangle((abs_x1, abs_y1), abs_x2 - abs_x1, abs_y2 - abs_y1, edgecolor='green', facecolor='none', linewidth=2))
263
+ else:
264
+ axes[1].text(img2.width / 2, img2.height / 2, "No bounding box found", color='red', fontsize=12, ha='center', va='center')
265
+
266
+ fig.tight_layout()
267
+
268
+ wandb_img = wandb.Image(fig)
269
+ plt.close(fig)
270
+
271
+
272
+ results_table.add_data(
273
+ meta["category"],
274
+ meta["src_id"],
275
+ meta["tgt_id"],
276
+ meta["kps_idx"],
277
+ wandb_img,
278
+ dict_to_string(job["img1_kp_semantics"]),
279
+ str(src_bbox),
280
+ str(all_tgt_results[i]['img_size']),
281
+ str(all_tgt_results[i]['crop_size']),
282
+ tgt_result['raw_response_text'],
283
+ str(tgt_bbox),
284
+ job["src_ort_hint"],
285
+ job["tgt_ort_hint"]
286
+ )
287
+
288
+ if args.DEBUG:
289
+ break
290
+
291
+ wandb.log({"evaluation_results": results_table})
292
+
293
+
294
+
295
+ # ===================================================================
296
+ # 4. YOUR ORIGINAL `main` FUNCTION, NOW SIMPLIFIED TO CALL THE BATCHED RUNNER
297
+ # ===================================================================
298
+ def main(args):
299
+ with open(args.SYSTEM_PROMPT_BBOX, 'r') as f:
300
+ system_prompt_bbox = f.read()
301
+ with open(args.TASK_PROMPT_BBOX, 'r') as f:
302
+ task_prompt_bbox = f.read()
303
+
304
+ # Initialize the Qwen VLM model
305
+ print("Initializing Qwen model...")
306
+ model = QwenVLDetector(model_dir=None, torch_dtype=torch.bfloat16, model_name=args.MODEL_NAME, device="auto", flash_attn=True)
307
+ # model = QwenVLDetector(model_dir=None, torch_dtype=torch.bfloat16, model_name="Qwen/Qwen2.5-VL-7B-Instruct", device="auto", flash_attn=True)
308
+
309
+ # Initialize WandB
310
+ print("Initializing WandB...")
311
+ wandb.init(
312
+ project=args.EVAL_DATASET,
313
+ entity="amazon_intern2025",
314
+ name=args.EXP_NOTE,
315
+ config=vars(args)
316
+ )
317
+
318
+ # prepare inference jobs
319
+ inference_jobs = prepare_inference_job(args)
320
+
321
+ # Run the optimized evaluation
322
+ all_tgt_results = run_batched_evaluation(args, model, system_prompt_bbox, task_prompt_bbox, inference_jobs)
323
+
324
+ # Log results to WandB
325
+ wandb_logging(inference_jobs, all_tgt_results)
326
+
327
+
328
+ print('Finished processing all categories and logging results.')
329
+ wandb.finish()
330
+ print('WandB run finished.')
331
+
332
+
333
+ if __name__ == "__main__":
334
+ parser = argparse.ArgumentParser(description="Predict correspondence using Qwen VLM.")
335
+ # ... (all your existing arguments) ...
336
+ parser.add_argument('--SYSTEM_PROMPT_BBOX', type=str, required=True)
337
+ parser.add_argument('--TASK_PROMPT_BBOX', type=str, required=True)
338
+ parser.add_argument('--EVAL_DATASET', type=str, required=True, choices=['pascal', 'spair'])
339
+ parser.add_argument('--SEM_DIR', type=str, required=True, help='Directory containing semantic annotations.')
340
+ parser.add_argument('--ORIENTATION_DIR', type=str, default=None, help='Directory containing orientation annotations.')
341
+ parser.add_argument('--ANNO_SIZE', type=int, default=840)
342
+ parser.add_argument('--TEST_SAMPLE', type=int, default=0)
343
+ parser.add_argument('--EXP_NOTE', type=str, default='Qwen VLM demo')
344
+ parser.add_argument('--SAVE_DIR', type=str, default='./results_vlm/', help='Directory to save intermediate results.')
345
+
346
+ parser.add_argument('--DEBUG', action='store_true', help='Enable debug mode for verbose output.') # decouple the prediction
347
+ parser.add_argument('--MODEL_NAME', type=str, default='Qwen/Qwen2.5-VL-32B-Instruct', help='Model name for Qwen VLM.')
348
+
349
+ # --- ADD THESE NEW ARGUMENTS for controlling batching ---
350
+ parser.add_argument('--BATCH_SIZE', type=int, default=4, help='Batch size for GPU inference.')
351
+ parser.add_argument('--NUM_WORKERS', type=int, default=8, help='Number of CPU cores for data loading.')
352
+
353
+
354
+ args = parser.parse_args()
355
+ main(args)
Code/sc_dit/inference_gpt_actor_critic.py ADDED
@@ -0,0 +1,403 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import wandb
3
+ from PIL import Image
4
+ from typing import Dict, Any, Tuple, Optional
5
+ import openai
6
+ import json
7
+ import time
8
+ from tqdm import tqdm
9
+ import numpy as np
10
+ import os
11
+ import logging
12
+
13
+ # custom imports
14
+ from utils_actor_critic import _actor_prompt, _critic_prompt, _encode_image_to_data_url, _make_overlay, _extract_json, save_actor_critic_output, check_if_done, get_existing_result
15
+ from dataset import get_dataset_info
16
+ from utils import load_eval_data, resize, load_img_and_kps
17
+ from openai_api_key import OPENAI_API_KEY
18
+
19
+
20
+ PART_VOCAB = {
21
+ "boat": ["hull", "bow", "stern", "mast", "sail", "trampoline", "rudder"],
22
+ "car": ["hood", "trunk", "roof", "wheel", "door", "bumper", "windshield"],
23
+ "person": ["head", "face", "arm", "hand", "leg", "torso", "foot"],
24
+ "dog": ["head", "snout", "ear", "leg", "tail", "torso"],
25
+ "bicycle": ["wheel", "handlebar", "pedal", "seat", "frame", "chain"],
26
+ "aeroplane": ["wing", "fuselage", "cockpit", "engine", "tail", "nose", "landing gear"],
27
+ "bus": ["wheel", "door", "window", "roof", "headlight", "rear", "side panel"],
28
+ "motorbike": ["wheel", "seat", "handlebar", "exhaust", "headlight", "fuel tank"],
29
+ "chair": ["seat", "backrest", "leg", "armrest"],
30
+ "tvmonitor": ["screen", "bezel", "stand", "corner"],
31
+ "train": ["car", "cabin", "roof", "window", "door", "headlight", "wheel"],
32
+ "bird": ["head", "beak", "wing", "tail", "foot", "eye"],
33
+ "sheep": ["head", "leg", "torso", "tail", "ear"],
34
+ "horse": ["head", "mane", "leg", "tail", "torso"],
35
+ "cow": ["head", "horn", "leg", "torso", "tail", "ear"],
36
+ "bottle": ["cap", "neck", "body", "base", "shoulder"],
37
+ "pottedplant": ["pot", "stem", "leaf", "flower"],
38
+ "cat": ["head", "ear", "paw", "tail", "torso", "eye"],
39
+ }
40
+
41
+ def _call_gpt4o_vision(
42
+ image: Image.Image,
43
+ prompt: str,
44
+ model: str = "gpt-4o-mini", # or "gpt-4o" / "gpt-4o-audio-video-preview"
45
+ max_tokens: int = 256,
46
+ timeout: int = 120,
47
+ temperature: float = 0.2,
48
+ reasoning_effort: str = "minimal",
49
+ ) -> str:
50
+ """
51
+ One-shot vision query. Returns raw assistant text.
52
+
53
+ OpenAI chat messages with images follow the documented format:
54
+ [{role:"user", content:[{type:"image_url",image_url:{url:data_url}}, {type:"text",text:prompt}]}] :contentReference[oaicite:0]{index=0}
55
+ """
56
+ client = openai.OpenAI(api_key=OPENAI_API_KEY)
57
+
58
+ data_url = _encode_image_to_data_url(image)
59
+ messages = [
60
+ {
61
+ "role": "user",
62
+ "content": [
63
+ {"type": "image_url", "image_url": {"url": data_url}},
64
+ {"type": "text", "text": prompt},
65
+ ],
66
+ }
67
+ ]
68
+
69
+ if model == 'gpt-5':
70
+ response = client.chat.completions.create(
71
+ model=model,
72
+ messages=messages,
73
+ # temperature=temperature,
74
+ reasoning_effort=reasoning_effort,
75
+ )
76
+ else:
77
+ response = client.chat.completions.create(
78
+ model=model,
79
+ messages=messages,
80
+ max_tokens=max_tokens,
81
+ temperature=temperature,
82
+ )
83
+
84
+ return response.choices[0].message.content
85
+
86
+
87
+ def _call_gpt4o_critic(
88
+ crop_img: Image.Image,
89
+ overlay_img: Image.Image,
90
+ prompt_text: str,
91
+ model: str = "gpt-4o-mini",
92
+ max_tokens: int = 256,
93
+ temperature: float = 0.2,
94
+ reasoning_effort: str = "minimal",
95
+ ) -> str:
96
+ """Send crop + overlay + text prompt to GPT-4o and return raw text."""
97
+ client = openai.OpenAI(api_key=OPENAI_API_KEY)
98
+
99
+ crop_url = _encode_image_to_data_url(crop_img)
100
+ overlay_url = _encode_image_to_data_url(overlay_img)
101
+
102
+ messages = [
103
+ {
104
+ "role": "user",
105
+ "content": [
106
+ {"type": "image_url", "image_url": {"url": crop_url}},
107
+ {"type": "image_url", "image_url": {"url": overlay_url}},
108
+ {"type": "text", "text": prompt_text},
109
+ ],
110
+ }
111
+ ]
112
+ # resp = client.chat.completions.create(
113
+ # model=model,
114
+ # messages=messages,
115
+ # max_tokens=max_tokens,
116
+ # temperature=temperature,
117
+ # )
118
+
119
+ if model == 'gpt-5':
120
+ resp = client.chat.completions.create(
121
+ model=model,
122
+ messages=messages,
123
+ reasoning_effort=reasoning_effort,
124
+ # temperature=temperature,
125
+ )
126
+ else:
127
+ resp = client.chat.completions.create(
128
+ model=model,
129
+ messages=messages,
130
+ max_tokens=max_tokens,
131
+ temperature=temperature,
132
+ )
133
+
134
+ return resp.choices[0].message.content
135
+
136
+
137
+ def actor_describe(
138
+ image: Image.Image,
139
+ point: Tuple[int, int],
140
+ class_name: str,
141
+ feedback: Optional[str] = None,
142
+ previous_json: Optional[Dict[str, Any]] = None,
143
+ model: str = "gpt-4o-mini",
144
+ max_tokens: int = 1024,
145
+ reasoning_effort: str = "minimal",
146
+ orientation_hint: str | None = None,
147
+ ) -> Optional[Dict[str, Any]]:
148
+ """
149
+ Run GPT-4o Vision as the ‘actor’.
150
+
151
+ • `feedback` …… critic’s message, or None on first pass
152
+ • `previous_json` …… actor’s last JSON (shown to model on revision)
153
+ """
154
+ x, y = point
155
+ prompt = _actor_prompt(x, y, class_name, feedback, previous_json) # ← NEW arg
156
+ if class_name.lower() in PART_VOCAB:
157
+ parts = PART_VOCAB[class_name.lower()]
158
+ part_list = ", ".join(f'"{p}"' for p in parts)
159
+ prompt += (
160
+ f"\n\nReference part vocabulary for '{class_name}':\n"
161
+ f"{part_list}\n"
162
+ "Try to choose the most specific valid part_name from this list."
163
+ )
164
+
165
+ raw = _call_gpt4o_vision(image, prompt, model=model, max_tokens=max_tokens, reasoning_effort=reasoning_effort)
166
+ return _extract_json(raw)
167
+
168
+
169
+ def critic_check(
170
+ image: Image.Image,
171
+ point: Tuple[int, int],
172
+ description_json: Dict[str, Any],
173
+ class_name: str,
174
+ model: str = "gpt-4o-mini",
175
+ orientation_hint: str | None = None,
176
+ hypothesis: str | None = None,
177
+ reasoning_effort: str = "minimal",
178
+ ) -> Dict[str, Any]:
179
+ """Run the critic with crop + overlay."""
180
+ x, y = point
181
+ bbox = description_json.get("proposed_bbox", [0, 0, 0, 0])
182
+ crop = image.crop(bbox)
183
+ overlay = _make_overlay(image, point, bbox)
184
+
185
+ prompt = _critic_prompt(
186
+ description_json, class_name, x, y,
187
+ orientation_hint=orientation_hint,
188
+ hypothesis=hypothesis,
189
+ )
190
+
191
+ if class_name.lower() in PART_VOCAB:
192
+ parts = PART_VOCAB[class_name.lower()]
193
+ prompt += f"\nReference part names for a '{class_name}': {', '.join(parts)}, choose the closest match. (e.g. front right of starboard hull should be bow)"
194
+
195
+ raw = _call_gpt4o_critic(crop, overlay, prompt, model=model, max_tokens=256, reasoning_effort=reasoning_effort)
196
+ parsed = _extract_json(raw)
197
+ if not parsed:
198
+ return {"is_consistent": False, "reason": "Critic returned invalid JSON."}
199
+ return parsed
200
+
201
+ def actor_critic_refine(
202
+ # image_path: str,
203
+ img: Image.Image,
204
+ point: Tuple[int, int],
205
+ class_name: str,
206
+ max_iters: int = 3,
207
+ model: str = "gpt-4o-mini",
208
+ reasoning_effort: str = "minimal",
209
+ ) -> Optional[Dict[str, Any]]:
210
+ """
211
+ Repeatedly run actor → critic until agreement or max_iters reached.
212
+ Returns final accepted description (or last attempt on failure).
213
+ """
214
+ # img = Image.open(image_path).convert("RGB")
215
+ # img = resize(img, args.ANNO_SIZE) # ensure consistent size
216
+ feedback = None
217
+ final_desc = None
218
+
219
+ print("\n=== Actor-Critic Self-Refine ===")
220
+ for i in range(max_iters):
221
+ print(f"\n--- Iteration {i+1}/{max_iters} ---")
222
+ # ---------- Python-side geometry check ---------- #
223
+
224
+ desc = actor_describe(
225
+ img,
226
+ point,
227
+ class_name,
228
+ feedback=feedback,
229
+ previous_json=final_desc, # ← pass last JSON back in
230
+ model=model,
231
+ reasoning_effort=reasoning_effort,
232
+ )
233
+
234
+ x, y = point
235
+ proposed_bbox = desc.get("proposed_bbox", [])
236
+ x1, y1, x2, y2 = proposed_bbox
237
+ inside = (x1 < x < x2) and (y1 < y < y2)
238
+
239
+ if not inside:
240
+ feedback = "Keypoint not inside proposed_bbox."
241
+ print("Geometry check failed:", feedback)
242
+ continue # skip critic check if geometry is wrong
243
+
244
+ # print('this is the proposed bbox', proposed_bbox)
245
+ # print('this is the type of the bbox', type(proposed_bbox))
246
+
247
+ if not desc:
248
+ print("Actor failed to return JSON; abort.")
249
+ break
250
+ print("Actor:", json.dumps(desc, indent=2, ensure_ascii=False))
251
+ final_desc = desc
252
+
253
+ # print('this is the desc', desc)
254
+
255
+ report = critic_check(img, point, desc, class_name, model, reasoning_effort=reasoning_effort)
256
+ print("Critic:", json.dumps(report, indent=2, ensure_ascii=False))
257
+
258
+ if report.get("is_consistent"):
259
+ print("Agreement reached.")
260
+ return final_desc, report
261
+
262
+ feedback = report.get("reason", "Description inconsistent.")
263
+ print("Revising…")
264
+ # time.sleep(1) # avoid rapid-fire rate-limit hits
265
+
266
+ print("Max iterations exhausted.")
267
+ return final_desc, report
268
+
269
+ def main(args):
270
+ run = wandb.init(
271
+ project=args.EVAL_DATASET,
272
+ entity="amazon_intern2025",
273
+ name=args.EXP_NOTE,
274
+ config=vars(args)
275
+ )
276
+
277
+ columns = [
278
+ "category",
279
+ "img_id",
280
+ "kpt_id",
281
+ "preview",
282
+ "part_name",
283
+ "orientation",
284
+ "spatial_location",
285
+ "object_facing_direction",
286
+ "bbox",
287
+ "actor_desc",
288
+ "critic_report",
289
+ ]
290
+
291
+ results_tb = wandb.Table(columns=columns)
292
+
293
+ data_dir, categories, split = get_dataset_info(args, split="test")
294
+ for cat in categories:
295
+ files, kps, _, used_kps = load_eval_data(args, data_dir, cat, split)
296
+ N = len(files) // 2
297
+ for pair_idx in tqdm(range(N), desc=f"Processing {cat}"):
298
+ img1, img1_kps = load_img_and_kps(
299
+ idx=2 * pair_idx, files=files, kps=kps, img_size=args.ANNO_SIZE
300
+ )
301
+
302
+ assert img1_kps.shape[0] == used_kps.shape[0], "Keypoints mismatch"
303
+ n_kps = img1_kps.shape[0]
304
+
305
+ for kps_idx in range(n_kps):
306
+ # If the query point is zero, skip it entirely
307
+ if np.all(img1_kps[kps_idx, :2].numpy() == (0,0)):
308
+ continue
309
+
310
+ # --- CHANGED: Define all identifiers upfront ---
311
+ used_kps_id = used_kps[kps_idx].item()
312
+ img_id = files[2 * pair_idx].split("/")[-1]
313
+ save_directory = os.path.join(args.SAVE_DIR, args.EVAL_DATASET, args.EXP_NOTE)
314
+ query_kp = tuple(img1_kps[kps_idx, :2].numpy())
315
+
316
+ # Initialize descriptors
317
+ actor_desc = None
318
+ critic_report = None
319
+
320
+ # --- CHANGED: Use the new getter function ---
321
+ existing_result = get_existing_result(
322
+ save_dir=save_directory, category=cat, image_id=img_id, kpt_id=used_kps_id
323
+ )
324
+
325
+ if existing_result:
326
+ print(f"Found existing result for kpt_id {used_kps_id} in {img_id}. Logging it.")
327
+ # Load data from the file
328
+ actor_desc = existing_result.get("actor_output")
329
+ critic_report = existing_result.get("critic_output")
330
+ else:
331
+ print(f"Processing new kpt_id {used_kps_id} for {img_id}.")
332
+ # This expensive part now only runs if the result doesn't exist
333
+ new_actor_desc, new_critic_report = actor_critic_refine(
334
+ img=img1,
335
+ point=query_kp,
336
+ class_name=cat.lower(),
337
+ max_iters=args.MAX_ITERS,
338
+ model=args.MODEL_NAME,
339
+ )
340
+
341
+ # Handle cases where the actor/critic might fail
342
+ if not new_actor_desc or not new_critic_report:
343
+ print(f"Actor/Critic failed for kpt {used_kps_id} on {img_id}. Skipping.")
344
+ continue
345
+
346
+ # Assign to the main variables
347
+ actor_desc = new_actor_desc
348
+ critic_report = new_critic_report
349
+
350
+ # Save the new result
351
+ save_actor_critic_output(
352
+ save_dir=save_directory,
353
+ category=cat,
354
+ image_id=img_id,
355
+ kpt_id=used_kps_id,
356
+ query_point=(int(query_kp[0]), int(query_kp[1])),
357
+ actor_desc=actor_desc,
358
+ critic_report=critic_report,
359
+ model_name=args.MODEL_NAME
360
+ )
361
+
362
+ # --- CHANGED: Unified logging for both new and existing results ---
363
+ if actor_desc and critic_report:
364
+ bbox = actor_desc.get("proposed_bbox", [])
365
+ img1_preview = wandb.Image(_make_overlay(img1, query_kp, bbox), caption=f"Keypoint {query_kp}")
366
+ results_tb.add_data(
367
+ cat,
368
+ img_id,
369
+ used_kps_id,
370
+ img1_preview,
371
+ actor_desc.get("part_name", ""),
372
+ actor_desc.get("orientation", ""),
373
+ actor_desc.get("spatial_location", ""),
374
+ actor_desc.get("object_facing_direction", ""),
375
+ actor_desc.get("proposed_bbox", []),
376
+ json.dumps(actor_desc, ensure_ascii=False),
377
+ json.dumps(critic_report, ensure_ascii=False),
378
+ )
379
+ # break
380
+ # break
381
+
382
+ # Save results to WandB
383
+ run.log({"results": results_tb})
384
+ wandb.finish()
385
+ # pass
386
+
387
+
388
+ if __name__ == "__main__":
389
+ parser = argparse.ArgumentParser(description="Run the Actor-Critic model for inference.")
390
+ parser.add_argument('--MODEL_NAME', type=str, default='gpt-4.1', help='Model name to use for inference.')
391
+ parser.add_argument('--ANNO_SIZE', type=int, default=840)
392
+ parser.add_argument('--TEST_SAMPLE', type=int, default=0)
393
+ parser.add_argument('--EXP_NOTE', type=str, default='Qwen VLM demo')
394
+ parser.add_argument('--SAVE_DIR', type=str, default='./results_vlm/', help='Directory to save intermediate results.')
395
+ parser.add_argument('--EVAL_DATASET', type=str, default='spair', help='Dataset to evaluate on.')
396
+ parser.add_argument('--MAX_ITERS', type=int, default=5, help='Maximum iterations for actor-critic refinement.')
397
+
398
+ parser.add_argument('--BATCH_SIZE', type=int, default=16, help='Batch size for GPU inference.')
399
+ parser.add_argument('--NUM_WORKERS', type=int, default=8, help='Number of CPU cores for data loading.')
400
+ parser.add_argument('--CONCURRENCY', type=int, default=8, help='Number of concurrent requests to OpenAI API.')
401
+
402
+ args = parser.parse_args()
403
+ main(args)
Code/sc_dit/inference_gpt_actor_critic_ort_with_instruction.py ADDED
@@ -0,0 +1,535 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import wandb
3
+ from PIL import Image
4
+ from typing import Dict, Any, Tuple, Optional
5
+ import openai
6
+ import json
7
+ import time
8
+ from tqdm import tqdm
9
+ import numpy as np
10
+ import os
11
+ import logging
12
+
13
+ # custom imports
14
+ from utils_actor_critic_with_ort import _actor_prompt, _critic_prompt, _encode_image_to_data_url, _make_overlay, _extract_json, save_actor_critic_output, check_if_done, get_existing_result, get_all_parts_from_schema
15
+ from dataset import get_dataset_info
16
+ from utils import load_eval_data, resize, load_img_and_kps
17
+ from openai_api_key import OPENAI_API_KEY
18
+ from part_dictionary import PART_SCHEMA
19
+
20
+
21
+ def _call_gpt4o_vision(
22
+ image: Image.Image,
23
+ prompt: str,
24
+ model: str = "gpt-4o-mini", # or "gpt-4o" / "gpt-4o-audio-video-preview"
25
+ max_tokens: int = 256,
26
+ timeout: int = 120,
27
+ temperature: float = 0.2,
28
+ reasoning_effort: str = "minimal",
29
+ ) -> str:
30
+ """
31
+ One-shot vision query. Returns raw assistant text.
32
+
33
+ OpenAI chat messages with images follow the documented format:
34
+ [{role:"user", content:[{type:"image_url",image_url:{url:data_url}}, {type:"text",text:prompt}]}] :contentReference[oaicite:0]{index=0}
35
+ """
36
+ client = openai.OpenAI(api_key=OPENAI_API_KEY)
37
+
38
+ data_url = _encode_image_to_data_url(image)
39
+ messages = [
40
+ {
41
+ "role": "user",
42
+ "content": [
43
+ {"type": "image_url", "image_url": {"url": data_url}},
44
+ {"type": "text", "text": prompt},
45
+ ],
46
+ }
47
+ ]
48
+
49
+ if model == 'gpt-5':
50
+ response = client.chat.completions.create(
51
+ model=model,
52
+ messages=messages,
53
+ # temperature=temperature,
54
+ reasoning_effort=reasoning_effort,
55
+ )
56
+ else:
57
+ response = client.chat.completions.create(
58
+ model=model,
59
+ messages=messages,
60
+ max_tokens=max_tokens,
61
+ temperature=temperature,
62
+ )
63
+
64
+ return response.choices[0].message.content
65
+
66
+
67
+ def _call_gpt4o_critic(
68
+ crop_img: Image.Image,
69
+ overlay_img: Image.Image,
70
+ prompt_text: str,
71
+ model: str = "gpt-4o-mini",
72
+ max_tokens: int = 256,
73
+ temperature: float = 0.2,
74
+ reasoning_effort: str = "minimal",
75
+ ) -> str:
76
+ """Send crop + overlay + text prompt to GPT-4o and return raw text."""
77
+ client = openai.OpenAI(api_key=OPENAI_API_KEY)
78
+
79
+ crop_url = _encode_image_to_data_url(crop_img)
80
+ overlay_url = _encode_image_to_data_url(overlay_img)
81
+
82
+ messages = [
83
+ {
84
+ "role": "user",
85
+ "content": [
86
+ {"type": "image_url", "image_url": {"url": crop_url}},
87
+ {"type": "image_url", "image_url": {"url": overlay_url}},
88
+ {"type": "text", "text": prompt_text},
89
+ ],
90
+ }
91
+ ]
92
+ # resp = client.chat.completions.create(
93
+ # model=model,
94
+ # messages=messages,
95
+ # max_tokens=max_tokens,
96
+ # temperature=temperature,
97
+ # )
98
+
99
+ if model == 'gpt-5':
100
+ resp = client.chat.completions.create(
101
+ model=model,
102
+ messages=messages,
103
+ reasoning_effort=reasoning_effort,
104
+ # temperature=temperature,
105
+ )
106
+ else:
107
+ resp = client.chat.completions.create(
108
+ model=model,
109
+ messages=messages,
110
+ max_tokens=max_tokens,
111
+ temperature=temperature,
112
+ )
113
+
114
+ return resp.choices[0].message.content
115
+
116
+
117
+ def actor_describe(
118
+ image: Image.Image,
119
+ point: Tuple[int, int],
120
+ class_name: str,
121
+ feedback: Optional[str] = None,
122
+ previous_json: Optional[Dict[str, Any]] = None,
123
+ model: str = "gpt-4o-mini",
124
+ max_tokens: int = 1024,
125
+ reasoning_effort: str = "low",
126
+ orientation_hint: str | None = None,
127
+ part_schema: Dict[str, Any] = PART_SCHEMA,
128
+ ) -> Optional[Dict[str, Any]]:
129
+ """
130
+ Run GPT-4o Vision as the ‘actor’.
131
+
132
+ • `feedback` …… critic’s message, or None on first pass
133
+ • `previous_json` …… actor’s last JSON (shown to model on revision)
134
+ """
135
+ x, y = point
136
+ prompt = _actor_prompt(
137
+ x=x,
138
+ y=y,
139
+ class_name=class_name,
140
+ feedback=feedback,
141
+ previous_json=previous_json,
142
+ orientation_hint=orientation_hint,
143
+ part_schema=part_schema,
144
+ ) # ← NEW arg
145
+ print(f"\nActor prompt:\n{prompt}\n")
146
+ # if class_name.lower() in PART_VOCAB:
147
+ # parts = PART_VOCAB[class_name.lower()]
148
+ # part_list = ", ".join(f'"{p}"' for p in parts)
149
+ # prompt += (
150
+ # f"\n\nReference part vocabulary for '{class_name}':\n"
151
+ # f"{part_list}\n"
152
+ # "Try to choose the most specific valid part_name from this list."
153
+ # )
154
+ class_specific_schema = part_schema.get(class_name.lower())
155
+ if class_specific_schema:
156
+ # Generate a flat list of ALL valid parts using the helper function
157
+ all_parts = get_all_parts_from_schema(class_specific_schema)
158
+
159
+ # Create a clean, sorted, unique list for the prompt
160
+ part_list_str = ", ".join(f'"{p}"' for p in sorted(list(set(all_parts))))
161
+
162
+ # Append the reference vocabulary to the prompt
163
+ prompt += (
164
+ f"\n\n---"
165
+ f"\n## Reference Vocabulary\n"
166
+ f"You are ENCOURAGED to choose the `part_name` from this official list for a '{class_name}':\n"
167
+ f"[{part_list_str}]"
168
+ )
169
+
170
+
171
+ raw = _call_gpt4o_vision(image, prompt, model=model, max_tokens=max_tokens, reasoning_effort=reasoning_effort)
172
+ return _extract_json(raw)
173
+
174
+
175
+ def critic_check(
176
+ image: Image.Image,
177
+ point: Tuple[int, int],
178
+ description_json: Dict[str, Any],
179
+ class_name: str,
180
+ model: str = "gpt-4o-mini",
181
+ orientation_hint: str | None = None,
182
+ reasoning_effort: str = "minimal",
183
+ part_schema: Dict[str, Any] = PART_SCHEMA,
184
+ ) -> Dict[str, Any]:
185
+ """Run the critic with crop + overlay."""
186
+ x, y = point
187
+ bbox = description_json.get("proposed_bbox", [0, 0, 0, 0])
188
+ crop = image.crop(bbox)
189
+ overlay = _make_overlay(image, point, bbox)
190
+
191
+ prompt = _critic_prompt(
192
+ description_json, class_name, x, y,
193
+ orientation_hint=orientation_hint,
194
+ part_schema=part_schema, # pass the schema for dynamic instructions
195
+ )
196
+
197
+ print(f"\nCritic prompt:\n{prompt}\n")
198
+
199
+ raw = _call_gpt4o_critic(crop, overlay, prompt, model=model, max_tokens=256, reasoning_effort=reasoning_effort)
200
+ parsed = _extract_json(raw)
201
+ if not parsed:
202
+ return {"is_consistent": False, "reason": "Critic returned invalid JSON."}
203
+ return parsed
204
+
205
+
206
+ def _score_feedback(feedback_type: str) -> int:
207
+ """Scores the quality of the critic's feedback type."""
208
+ score_map = {
209
+ "Approved": 4, # Perfect: Correct and specific.
210
+ "Go Deeper": 3, # Excellent: Correct part, just needs refinement.
211
+ "Check Other Children": 2, # Good: Wrong guess, but the anchor is correct.
212
+ "Geometric Error": 2, # Good: Correct part, but location is wrong.
213
+ "Go Back": 1, # Bad: Fundamentally wrong part.
214
+ }
215
+ return score_map.get(feedback_type, 0) # Default to 0 for any unknown type
216
+
217
+
218
+ def actor_critic_refine(
219
+ # image_path: str,
220
+ img: Image.Image,
221
+ point: Tuple[int, int],
222
+ class_name: str,
223
+ max_iters: int = 3,
224
+ model: str = "gpt-4o-mini",
225
+ orientation_hint: Optional[str] = None,
226
+ reasoning_effort: str = "low", # NEW ARG
227
+ part_schema: Dict[str, Any] = PART_SCHEMA, # pass the schema for dynamic instructions
228
+ ) -> Optional[Dict[str, Any]]:
229
+ """
230
+ Repeatedly run actor → critic until agreement or max_iters reached.
231
+ Returns final accepted description (or last attempt on failure).
232
+ """
233
+ # img = Image.open(image_path).convert("RGB")
234
+ # img = resize(img, args.ANNO_SIZE) # ensure consistent size
235
+ feedback = None
236
+
237
+ # set tracking variables
238
+ best_desc = None
239
+ prev_desc = None
240
+ best_desc_score = 0 # Start with the lowest possible score
241
+
242
+ print("I am using the new function")
243
+
244
+ print("\n=== Actor-Critic Self-Refine ===")
245
+ for i in range(max_iters):
246
+ print(f"\n--- Iteration {i+1}/{max_iters} ---")
247
+ # ---------- Python-side geometry check ---------- #
248
+
249
+ current_desc = actor_describe(
250
+ img,
251
+ point,
252
+ class_name,
253
+ feedback=feedback,
254
+ previous_json=prev_desc, # ← pass last JSON back in
255
+ model=model,
256
+ orientation_hint=orientation_hint, # NEW ARG
257
+ reasoning_effort=reasoning_effort, # NEW ARG
258
+ part_schema=part_schema, # pass the schema for dynamic instructions
259
+ )
260
+
261
+ if not current_desc:
262
+ print("Actor failed to return JSON; abort.")
263
+ break
264
+ prev_desc = current_desc # Store the last description for the next iteration
265
+
266
+ print('current output from actor:', current_desc)
267
+
268
+ x, y = point
269
+ proposed_bbox = current_desc.get("proposed_bbox", [])
270
+ x1, y1, x2, y2 = proposed_bbox
271
+ inside = (x1 < x < x2) and (y1 < y < y2)
272
+
273
+ print('proposed bbox:', proposed_bbox)
274
+ print('point:', point)
275
+
276
+ if not inside:
277
+ feedback = f"Keypoint not inside proposed_bbox."
278
+ print("Geometry check failed:", feedback)
279
+ continue # skip critic check if geometry is wrong
280
+
281
+
282
+ report = critic_check(
283
+ img,
284
+ point,
285
+ current_desc,
286
+ class_name,
287
+ model,
288
+ orientation_hint=orientation_hint,
289
+ reasoning_effort=reasoning_effort,
290
+ part_schema=part_schema,
291
+ )
292
+ print("Critic report:", json.dumps(report, indent=2, ensure_ascii=False))
293
+
294
+
295
+ # --- NEW: Score and State Management ---
296
+ feedback_type = report.get("feedback_type", "Go Back") # Default to worst case
297
+ current_score = _score_feedback(feedback_type)
298
+ print(f"Iteration Score: {current_score} (Feedback: {feedback_type})")
299
+
300
+ if current_score >= best_desc_score:
301
+ print(f"Found new best description (Score: {current_score} >= {best_desc_score}).")
302
+ best_desc = current_desc
303
+ best_desc_score = current_score
304
+
305
+ # Check for success
306
+ if feedback_type == "Approved":
307
+ print("Agreement reached.")
308
+ return best_desc, report
309
+
310
+ feedback = report.get("reason", "Description inconsistent.")
311
+ print("Revising…")
312
+ print(f"Feedback for next actor: {feedback}")
313
+ # time.sleep(1) # avoid rapid-fire rate-limit hits
314
+
315
+ print("Max iterations exhausted.")
316
+ return best_desc, report
317
+
318
+
319
+ def describe_orientation(azimuth: float, polar: float, *, confidence: float | None = None) -> Dict:
320
+ """
321
+ azimuth: yaw in degrees, 0 = toward viewer, 90 = facing left, 180 = away, 270 = facing right
322
+ polar : elevation in degrees; + up (top view), - down (bottom view); 0 ≈ side
323
+ returns a structured viewer-centric hint
324
+ """
325
+ # az = _bucket_angle(azimuth)
326
+ assert azimuth >= 0 and azimuth < 360, "Azimuth must be in the range [0, 360)"
327
+ az = azimuth
328
+ po = polar
329
+
330
+ # ---- Facing & yaw_bias ---------------------------------------------------
331
+ # Main facing from octants; diagonals become a bias on the nearest cardinal
332
+ if 337.5 <= az or az <= 22.5:
333
+ facing, yaw_bias = "toward viewer", "none"
334
+ elif 22.5 < az <= 67.5:
335
+ facing, yaw_bias = "toward viewer", "left"
336
+ elif 67.5 < az <= 112.5:
337
+ facing, yaw_bias = "facing left", "none"
338
+ elif 112.5 < az <= 157.5:
339
+ facing, yaw_bias = "away from viewer", "left"
340
+ elif 157.5 < az <= 202.5:
341
+ facing, yaw_bias = "away from viewer", "none"
342
+ elif 202.5 < az <= 247.5:
343
+ facing, yaw_bias = "away from viewer", "right"
344
+ elif 247.5 < az <= 292.5:
345
+ facing, yaw_bias = "facing right", "none"
346
+ elif 292.5 < az <= 337.5:
347
+ facing, yaw_bias = "toward viewer", "right"
348
+ else:
349
+ facing, yaw_bias = "unknown", "none"
350
+
351
+ # ---- Elevation bucket ----------------------------------------------------
352
+ if -22.5 < po <= 22.5:
353
+ elevation = "side"
354
+ elif 22.5 < po <= 60:
355
+ elevation = "oblique-top"
356
+ elif po > 60:
357
+ elevation = "top-down"
358
+ elif -60 <= po <= -22.5:
359
+ elevation = "oblique-bottom"
360
+ else: # po < -60
361
+ elevation = "bottom-up"
362
+
363
+ # ---- Near-side in object-centric terms ----------------------------------
364
+ if facing == "facing left":
365
+ near_side = "object-left"
366
+ elif facing == "facing right":
367
+ near_side = "object-right"
368
+ elif facing == "toward viewer" or facing == "away from viewer":
369
+ near_side = "object-right" if yaw_bias == "right" else ("object-left" if yaw_bias == "left" else "none")
370
+ # elif facing == "away from viewer":
371
+ # # flipped: when looking at the back, the opposite side appears nearer
372
+ # near_side = "object-left" if yaw_bias == "left" else ("object-right" if yaw_bias == "right" else "none")
373
+ else:
374
+ near_side = "none"
375
+
376
+ return {
377
+ "facing": facing, # enum (camera-relative)
378
+ "yaw_bias": yaw_bias, # enum (camera-relative)
379
+ "elevation": elevation, # enum (camera-relative)
380
+ "near_side": near_side, # enum (object-centric)
381
+ "confidence": confidence,
382
+ }
383
+
384
+
385
+ def main(args):
386
+ run = wandb.init(
387
+ project=args.EVAL_DATASET,
388
+ entity="amazon_intern2025",
389
+ name=args.EXP_NOTE,
390
+ config=vars(args)
391
+ )
392
+
393
+ columns = [
394
+ "category",
395
+ "img_id",
396
+ "kpt_id",
397
+ "orientation_hint",
398
+ "preview",
399
+ "part_name",
400
+ "part_location",
401
+ "spatial_location",
402
+ "object_facing_direction",
403
+ "bbox",
404
+ "actor_desc",
405
+ "critic_report",
406
+ ]
407
+
408
+ results_tb = wandb.Table(columns=columns)
409
+
410
+ data_dir, categories, split = get_dataset_info(args, split="test")
411
+ for cat in categories:
412
+ files, kps, _, used_kps = load_eval_data(args, data_dir, cat, split)
413
+ N = len(files) // 2
414
+ for pair_idx in tqdm(range(N), desc=f"Processing {cat}"):
415
+ img1, img1_kps = load_img_and_kps(
416
+ idx=2 * pair_idx, files=files, kps=kps, img_size=args.ANNO_SIZE
417
+ )
418
+
419
+ img_id = files[2 * pair_idx].split("/")[-1]
420
+ file_name = img_id.split(".")[0]
421
+ orient_path = os.path.join(args.ORIENTATION_PATH, cat, f"{file_name}.json")
422
+
423
+ with open(orient_path, 'r') as f:
424
+ orientation_raw_json = json.load(f)
425
+
426
+ assert img1_kps.shape[0] == used_kps.shape[0], "Keypoints mismatch"
427
+ n_kps = img1_kps.shape[0]
428
+
429
+ for kps_idx in range(n_kps):
430
+ # If the query point is zero, skip it entirely
431
+ if np.all(img1_kps[kps_idx, :2].numpy() == (0,0)):
432
+ continue
433
+
434
+ # --- CHANGED: Define all identifiers upfront ---
435
+ used_kps_id = used_kps[kps_idx].item()
436
+ save_directory = os.path.join(args.SAVE_DIR, args.EVAL_DATASET, args.EXP_NOTE)
437
+ query_kp = tuple(img1_kps[kps_idx, :2].numpy())
438
+
439
+ orientation_hint = describe_orientation(azimuth=orientation_raw_json.get('azimuth',''),
440
+ polar=orientation_raw_json.get('polar',''),
441
+ confidence=orientation_raw_json.get('confidence', ''))
442
+
443
+ # Initialize descriptors
444
+ actor_desc = None
445
+ critic_report = None
446
+
447
+ # --- CHANGED: Use the new getter function ---
448
+ existing_result = get_existing_result(
449
+ save_dir=save_directory, category=cat, image_id=img_id, kpt_id=used_kps_id
450
+ )
451
+
452
+ if existing_result:
453
+ print(f"Found existing result for kpt_id {used_kps_id} in {img_id}. Logging it.")
454
+ # Load data from the file
455
+ actor_desc = existing_result.get("actor_output")
456
+ critic_report = existing_result.get("critic_output")
457
+ else:
458
+ print(f"Processing new kpt_id {used_kps_id} for {img_id}.")
459
+ # This expensive part now only runs if the result doesn't exist
460
+ new_actor_desc, new_critic_report = actor_critic_refine(
461
+ img=img1,
462
+ point=query_kp,
463
+ class_name=cat.lower(),
464
+ max_iters=args.MAX_ITERS,
465
+ model=args.MODEL_NAME,
466
+ orientation_hint=orientation_hint,
467
+ reasoning_effort=args.REASONING_EFFORT,
468
+ )
469
+
470
+ # Handle cases where the actor/critic might fail
471
+ if not new_actor_desc or not new_critic_report:
472
+ print(f"Actor/Critic failed for kpt {used_kps_id} on {img_id}. Skipping.")
473
+ continue
474
+
475
+ # Assign to the main variables
476
+ actor_desc = new_actor_desc
477
+ critic_report = new_critic_report
478
+
479
+ # Save the new result
480
+ save_actor_critic_output(
481
+ save_dir=save_directory,
482
+ category=cat,
483
+ image_id=img_id,
484
+ kpt_id=used_kps_id,
485
+ query_point=(int(query_kp[0]), int(query_kp[1])),
486
+ actor_desc=actor_desc,
487
+ critic_report=critic_report,
488
+ model_name=args.MODEL_NAME
489
+ )
490
+
491
+ # --- CHANGED: Unified logging for both new and existing results ---
492
+ if actor_desc and critic_report:
493
+ bbox = actor_desc.get("proposed_bbox", [])
494
+ img1_preview = wandb.Image(_make_overlay(img1, query_kp, bbox), caption=f"Keypoint {query_kp}")
495
+ results_tb.add_data(
496
+ cat,
497
+ img_id,
498
+ used_kps_id,
499
+ str(orientation_hint),
500
+ img1_preview,
501
+ actor_desc.get("part_name", ""),
502
+ actor_desc.get("part_location", ""),
503
+ actor_desc.get("spatial_location", ""),
504
+ actor_desc.get("object_facing_direction", ""),
505
+ actor_desc.get("proposed_bbox", []),
506
+ json.dumps(actor_desc, ensure_ascii=False),
507
+ json.dumps(critic_report, ensure_ascii=False),
508
+ )
509
+ # break
510
+ # break
511
+
512
+ # Save results to WandB
513
+ run.log({"results": results_tb})
514
+ wandb.finish()
515
+ # pass
516
+
517
+
518
+ if __name__ == "__main__":
519
+ parser = argparse.ArgumentParser(description="Run the Actor-Critic model for inference.")
520
+ parser.add_argument('--MODEL_NAME', type=str, default='gpt-4.1', help='Model name to use for inference.')
521
+ parser.add_argument('--ANNO_SIZE', type=int, default=840)
522
+ parser.add_argument('--TEST_SAMPLE', type=int, default=0)
523
+ parser.add_argument('--EXP_NOTE', type=str, default='Qwen VLM demo')
524
+ parser.add_argument('--SAVE_DIR', type=str, default='./results_vlm/', help='Directory to save intermediate results.')
525
+ parser.add_argument('--EVAL_DATASET', type=str, default='spair', help='Dataset to evaluate on.')
526
+ parser.add_argument('--MAX_ITERS', type=int, default=5, help='Maximum iterations for actor-critic refinement.')
527
+ parser.add_argument('--ORIENTATION_PATH', type=str, required=True, help='Path to orientation hint file.')
528
+
529
+ parser.add_argument('--BATCH_SIZE', type=int, default=16, help='Batch size for GPU inference.')
530
+ parser.add_argument('--NUM_WORKERS', type=int, default=8, help='Number of CPU cores for data loading.')
531
+ parser.add_argument('--CONCURRENCY', type=int, default=8, help='Number of concurrent requests to OpenAI API.')
532
+ parser.add_argument('--REASONING_EFFORT', type=str, default='low', choices=['minimal', 'low'], help='Reasoning effort level for the model.')
533
+
534
+ args = parser.parse_args()
535
+ main(args)
Code/sc_dit/inference_gpt_batched.py ADDED
@@ -0,0 +1,325 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import wandb
3
+ import matplotlib.pyplot as plt
4
+ from torch.utils.data import DataLoader
5
+ import argparse
6
+ import torch
7
+ from tqdm import tqdm
8
+ from PIL import ImageDraw, ImageFont
9
+ import numpy as np
10
+ import json
11
+ import pathlib
12
+ from io import BytesIO
13
+ import base64
14
+ from PIL import Image
15
+
16
+ # openai imports
17
+ # from openai import OpenAI
18
+ from openai import AsyncClient
19
+ # from throttle
20
+
21
+ # mutliprocessing for parallel job preparation
22
+ import concurrent.futures
23
+
24
+ # custom imports
25
+ from dataset import get_dataset_info, VLDatasetPaired
26
+ from utils import load_eval_data, load_img_and_kps
27
+ # from qwen_utils import QwenVLDetector
28
+ from openai_utils import build_messages, CoarseSemCorrBBox, make_preview, img_to_data_url
29
+ # from predict_correspondence_vlm import create_image_with_one_kp
30
+
31
+ # Place the new, fast drawing function here
32
+ def create_image_with_one_kp_pil(img, kps, kps_idx=0, circ_size=200, add_text=True, **kwargs):
33
+ img_with_kp = img.copy()
34
+ draw = ImageDraw.Draw(img_with_kp)
35
+ cx, cy = kps[kps_idx, 0], kps[kps_idx, 1]
36
+ radius = circ_size / 10
37
+ bbox = [cx - radius, cy - radius, cx + radius, cy + radius]
38
+ draw.ellipse(bbox, outline="red", width=4)
39
+
40
+ if add_text:
41
+ text = "Ref"
42
+ # Try to use a better font, or fall back to the default if not found
43
+ # try:
44
+ font = ImageFont.truetype("DejaVuSans.ttf", size=26)
45
+ # except IOError:
46
+ # print('test')
47
+ # font = ImageFont.load_default()
48
+
49
+ # Get text bounding box for centering
50
+ # print(font)
51
+ bbox_text = draw.textbbox((0, 0), text, font=font)
52
+ text_width = bbox_text[2] - bbox_text[0]
53
+ text_height = bbox_text[3] - bbox_text[1]
54
+
55
+ text_x = cx - text_width // 2
56
+ text_y = cy - text_height // 2
57
+ draw.text((text_x, text_y), text, font=font, fill=(255, 255, 255), text_anchor = "mm")
58
+ return img_with_kp
59
+
60
+ # Helper function to process a single keypoint (needed for parallelization)
61
+ def prepare_single_job(task_args):
62
+ img1, img2, img1_kps, kps_idx, category, src_id, tgt_id, task_prompt = task_args
63
+
64
+ if img1_kps[kps_idx, 2] == 1:
65
+ # Use the fast, new function
66
+ img1_kp = create_image_with_one_kp_pil(img1, img1_kps, kps_idx=kps_idx)
67
+
68
+ # plt.imsave("img2_masked_loop.png", img2)
69
+
70
+ return {
71
+ "img1_kp": img1_kp,
72
+ "img2": img2,
73
+ "task_prompt": task_prompt,
74
+ "metadata": { "category": category, "src_id": src_id, "tgt_id": tgt_id, "kps_idx": kps_idx }
75
+ }
76
+ return None
77
+
78
+
79
+ def encode_image_to_base64(image: Image.Image) -> str:
80
+ """Encodes a PIL image to a base64 data URI."""
81
+ buffered = BytesIO()
82
+ image.save(buffered, format="JPEG")
83
+ img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
84
+ return f"data:image/jpeg;base64,{img_str}"
85
+
86
+ import asyncio, itertools, json
87
+ from tqdm import tqdm
88
+
89
+ def run_batched_evaluation(args, model, system_prompt, task_prompt):
90
+ """
91
+ End‑to‑end evaluation: prepare jobs ➜ async GPT calls ➜ majority vote ➜
92
+ preview + W&B logging (streaming rows, no giant table in RAM).
93
+ """
94
+
95
+ # ────────── 0. WandB ───────────────────────────────────────────────────#
96
+ run = wandb.init(
97
+ project=args.EVAL_DATASET,
98
+ entity="amazon_intern2025",
99
+ name=args.EXP_NOTE,
100
+ config=vars(args)
101
+ )
102
+ run.define_metric("step")
103
+
104
+ columns = [
105
+ "step", "category", "src_id", "tgt_id", "kpt_id",
106
+ "preview", "full_response",
107
+ "bbox_src", "bbox_tgt",
108
+ #"src_img_size", "tgt_img_size"
109
+ ]
110
+ results_tb = wandb.Table(columns=columns)
111
+
112
+ # ────────── 1. Async GPT helper ────────────────────────────────────────#
113
+ async def one_call(text_prompt, img1, img2):
114
+
115
+ # print(type(img1), type(img2))
116
+
117
+ # plt.imsave("img2_masked.png", img2)
118
+
119
+ img1_url = encode_image_to_base64(img1)
120
+ img2_url = encode_image_to_base64(img2)
121
+
122
+ msgs = build_messages(system_prompt, text_prompt, img1_url, img2_url)
123
+ resp = await model.chat.completions.create(
124
+ model=args.MODEL_NAME,
125
+ messages=msgs,
126
+ response_format={"type": "json_object"},
127
+ # parse_model=CoarseSemCorrBBox,
128
+ )
129
+ # return resp
130
+ raw_json = resp.choices[0].message.content
131
+
132
+ # Add this check!
133
+ if not raw_json:
134
+ print(f"Warning: Received an empty response for item: {text_prompt}. Skipping.")
135
+ return None # or return a default error object
136
+
137
+ return CoarseSemCorrBBox.model_validate_json(raw_json)
138
+
139
+ CONCURRENCY = min(args.CONCURRENCY, 1) # safety cap
140
+
141
+
142
+ async def batch_worker(batch):
143
+ src_urls, tgt_urls, txt_prompts = zip(*batch)
144
+ tasks = [one_call(tp, s, t) for tp, s, t in zip(txt_prompts, src_urls, tgt_urls)]
145
+
146
+ # throttle
147
+ out, i = [], 0
148
+ while i < len(tasks):
149
+ chunk = tasks[i:i + CONCURRENCY]
150
+ out.extend(await asyncio.gather(*chunk))
151
+ i += CONCURRENCY
152
+ return out
153
+
154
+ async def main_loop(data_loader):
155
+ all_preds = []
156
+ for batch in tqdm(data_loader, desc="semantic correspondence"):
157
+ all_preds.extend(await batch_worker(batch))
158
+ return all_preds
159
+
160
+ # ────────── 2. Prepare jobs ────────────────────────────────────────────#
161
+ data_dir, categories, split = get_dataset_info(args, split="test")
162
+ tasks_to_prepare = []
163
+ for cat in categories:
164
+ files, kps, _, _ = load_eval_data(args, data_dir, cat, split)
165
+ N = len(files) // 2
166
+ for pair_idx in tqdm(range(N), desc=f"prep {cat}"):
167
+ img1, img1_kps = load_img_and_kps(
168
+ idx=2 * pair_idx, files=files, kps=kps, img_size=args.ANNO_SIZE
169
+ )
170
+ img2, _ = load_img_and_kps(
171
+ idx=2 * pair_idx + 1, files=files, kps=kps,
172
+ img_size=args.ANNO_SIZE, load_masked=args.LOAD_MASKED
173
+ )
174
+ src_id = pathlib.Path(files[2 * pair_idx]).stem
175
+ tgt_id = pathlib.Path(files[2 * pair_idx + 1]).stem
176
+
177
+ # for k_idx, (x, y) in enumerate(img1_kps):
178
+ # print('img1_kps.shape', img1_kps.shape)
179
+ for k_idx in range(img1_kps.shape[0]):
180
+ kp_prompt = task_prompt.format(class_name=cat, point_x=img1_kps[k_idx, 0], point_y=img1_kps[k_idx, 1])
181
+ tasks_to_prepare.append(
182
+ (img1, img2, img1_kps, k_idx, cat, src_id, tgt_id, kp_prompt)
183
+ )
184
+
185
+ # parallel preprocessing (e.g. resizing / base64 upload)
186
+ with concurrent.futures.ProcessPoolExecutor(max_workers=os.cpu_count()) as ex:
187
+ prepared = list(
188
+ tqdm(
189
+ ex.map(prepare_single_job, tasks_to_prepare),
190
+ total=len(tasks_to_prepare),
191
+ desc="prepare jobs",
192
+ )
193
+ )
194
+ inference_jobs = [j for j in prepared if j is not None]
195
+ print(f"Prepared {len(inference_jobs)} visible‑keypoint jobs.")
196
+
197
+ # ────────── 3. Build DataLoader & run GPT ──────────────────────────────#
198
+ src_urls = [j["img1_kp"] for j in inference_jobs]
199
+ tgt_urls = [j["img2"] for j in inference_jobs]
200
+ prompts = [j["task_prompt"] for j in inference_jobs]
201
+
202
+ dataset_sem = VLDatasetPaired(src_urls, tgt_urls, prompts)
203
+ loader_sem = DataLoader(
204
+ dataset_sem,
205
+ batch_size=args.BATCH_SIZE,
206
+ num_workers=args.NUM_WORKERS,
207
+ collate_fn=lambda x: x,
208
+ pin_memory=True,
209
+ )
210
+
211
+ preds = asyncio.run(main_loop(loader_sem)) # list[CoarseSemCorrBBox]
212
+
213
+ # keep raw preds alongside run
214
+ # pred_file = pathlib.Path(run.dir) / "bboxes.jsonl"
215
+ # pred_file.write_text("\n".join(json.dumps(p.model_dump()) for p in preds))
216
+ # run.log_artifact(
217
+ # wandb.Artifact(name=f"preds_{run.id}", type="predictions").add_file(pred_file)
218
+ # )
219
+
220
+ out_dir = pathlib.Path(args.SAVE_DIR).expanduser()
221
+ out_dir.mkdir(parents=True, exist_ok=True)
222
+
223
+ pred_file = out_dir / f"{args.EXP_NOTE}_bboxes_{run.id}.jsonl"
224
+ with pred_file.open("w") as f:
225
+ for p in preds:
226
+ f.write(json.dumps(p.model_dump()) + "\n")
227
+
228
+ # ────────── 4. Streaming W&B logging ──────────────────────────────────#
229
+ for step, (job, pred) in tqdm(
230
+ enumerate(zip(inference_jobs, preds), 1), total=len(inference_jobs),
231
+ desc="log wandb"
232
+ ):
233
+ # unpack
234
+ meta = job["metadata"]
235
+ # semantics = job["extracted_semantics"]
236
+ full_response = pred.full_response
237
+ src_bbox = pred.bbox_src
238
+ tgt_bbox = pred.bbox_tgt #if pred.bbox_tgt else "none"
239
+
240
+ fig = make_preview(
241
+ np.array(job["img1_kp"]),
242
+ np.array(job["img2"]),
243
+ src_bbox,
244
+ tgt_bbox,
245
+ )
246
+
247
+ wandb_img = wandb.Image(fig)
248
+ plt.close(fig)
249
+
250
+ results_tb.add_data(
251
+ step,
252
+ meta["category"],
253
+ meta["src_id"],
254
+ meta["tgt_id"],
255
+ meta["kps_idx"],
256
+ wandb_img,
257
+ full_response,
258
+ src_bbox,
259
+ tgt_bbox,
260
+ # meta["src_input_size"],
261
+ # meta["tgt_input_size"],
262
+ )
263
+
264
+ if step % 100 == 0:
265
+ wandb.log({"evaluation_results": results_tb}, step=step)
266
+
267
+ # final flush
268
+ wandb.log({"evaluation_results": results_tb}, step=len(inference_jobs))
269
+ run.finish()
270
+
271
+
272
+ # ===================================================================
273
+ # 4. YOUR ORIGINAL `main` FUNCTION, NOW SIMPLIFIED TO CALL THE BATCHED RUNNER
274
+ # ===================================================================
275
+ def main(args):
276
+ with open(args.SYSTEM_PROMPT, 'r') as f:
277
+ system_prompt = f.read()
278
+ with open(args.TASK_PROMPT, 'r') as f:
279
+ task_prompt = f.read()
280
+
281
+ task_prompt = task_prompt.replace("{", "{{").replace("}", "}}")
282
+
283
+ # Initialize the OpenAI VLM model
284
+ print("Initializing OpenAI model...")
285
+ # model = OpenAI()
286
+ model = AsyncClient()
287
+
288
+ # Initialize WandB
289
+ # print("Initializing WandB...")
290
+ # wandb.init(
291
+ # project=args.EVAL_DATASET,
292
+ # entity="amazon_intern2025",
293
+ # name=args.EXP_NOTE,
294
+ # config=vars(args)
295
+ # )
296
+
297
+ # Run the optimized evaluation
298
+ run_batched_evaluation(args, model, system_prompt, task_prompt)
299
+
300
+ print('Finished processing all categories and logging results.')
301
+ wandb.finish()
302
+ print('WandB run finished.')
303
+
304
+
305
+ if __name__ == "__main__":
306
+ parser = argparse.ArgumentParser(description="Predict correspondence using Qwen VLM.")
307
+ # ... (all your existing arguments) ...
308
+ parser.add_argument('--SYSTEM_PROMPT', type=str, required=True)
309
+ parser.add_argument('--TASK_PROMPT', type=str, required=True)
310
+ parser.add_argument('--EVAL_DATASET', type=str, required=True, choices=['pascal', 'spair'])
311
+ parser.add_argument('--MODEL_NAME', type=str, default='gpt-4.1-mini', help='Model name to use for inference.')
312
+ parser.add_argument('--ANNO_SIZE', type=int, default=840)
313
+ parser.add_argument('--TEST_SAMPLE', type=int, default=0)
314
+ parser.add_argument('--EXP_NOTE', type=str, default='Qwen VLM demo')
315
+ parser.add_argument('--SAVE_DIR', type=str, default='./results_vlm/', help='Directory to save intermediate results.')
316
+
317
+
318
+ # --- ADD THESE NEW ARGUMENTS for controlling batching ---
319
+ parser.add_argument('--BATCH_SIZE', type=int, default=16, help='Batch size for GPU inference.')
320
+ parser.add_argument('--NUM_WORKERS', type=int, default=8, help='Number of CPU cores for data loading.')
321
+ parser.add_argument('--CONCURRENCY', type=int, default=8, help='Number of concurrent requests to OpenAI API.')
322
+ parser.add_argument('--LOAD_MASKED', action='store_true', help='Load masked images for inference.')
323
+
324
+ args = parser.parse_args()
325
+ main(args)
Code/sc_dit/near_side_correction.py ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import glob
4
+ from typing import Dict, Any, Optional, Tuple
5
+
6
+ # custom imports
7
+ def previous_flipped_near_side_orientation(azimuth: float, polar: float, *, confidence: float | None = None) -> Dict:
8
+ """
9
+ azimuth: yaw in degrees, 0 = toward viewer, 90 = facing left, 180 = away, 270 = facing right
10
+ polar : elevation in degrees; + up (top view), - down (bottom view); 0 ≈ side
11
+ returns a structured viewer-centric hint
12
+ """
13
+ # az = _bucket_angle(azimuth)
14
+ assert azimuth >= 0 and azimuth < 360, "Azimuth must be in the range [0, 360)"
15
+ az = azimuth
16
+ po = polar
17
+
18
+ # ---- Facing & yaw_bias ---------------------------------------------------
19
+ # Main facing from octants; diagonals become a bias on the nearest cardinal
20
+ if 337.5 <= az or az <= 22.5:
21
+ facing, yaw_bias = "toward viewer", "none"
22
+ elif 22.5 < az <= 67.5:
23
+ facing, yaw_bias = "toward viewer", "left"
24
+ elif 67.5 < az <= 112.5:
25
+ facing, yaw_bias = "facing left", "none"
26
+ elif 112.5 < az <= 157.5:
27
+ facing, yaw_bias = "away from viewer", "left"
28
+ elif 157.5 < az <= 202.5:
29
+ facing, yaw_bias = "away from viewer", "none"
30
+ elif 202.5 < az <= 247.5:
31
+ facing, yaw_bias = "away from viewer", "right"
32
+ elif 247.5 < az <= 292.5:
33
+ facing, yaw_bias = "facing right", "none"
34
+ elif 292.5 < az <= 337.5:
35
+ facing, yaw_bias = "toward viewer", "right"
36
+ else:
37
+ facing, yaw_bias = "unknown", "none"
38
+
39
+ # ---- Elevation bucket ----------------------------------------------------
40
+ if -22.5 < po <= 22.5:
41
+ elevation = "side"
42
+ elif 22.5 < po <= 60:
43
+ elevation = "oblique-top"
44
+ elif po > 60:
45
+ elevation = "top-down"
46
+ elif -60 <= po <= -22.5:
47
+ elevation = "oblique-bottom"
48
+ else: # po < -60
49
+ elevation = "bottom-up"
50
+
51
+ # ---- Near-side in object-centric terms ----------------------------------
52
+ if facing == "facing left":
53
+ near_side = "object-left"
54
+ elif facing == "facing right":
55
+ near_side = "object-right"
56
+
57
+ ##### this is the flipped logic and needs to be corrected ######
58
+ elif facing == "toward viewer":
59
+ near_side = "object-left" if yaw_bias == "left" else ("object-right" if yaw_bias == "right" else "none")
60
+ elif facing == "away from viewer":
61
+ # flipped: when looking at the back, the opposite side appears nearer
62
+ near_side = "object-right" if yaw_bias == "left" else ("object-left" if yaw_bias == "right" else "none")
63
+ else:
64
+ near_side = "none"
65
+
66
+ return {
67
+ "facing": facing, # enum (camera-relative)
68
+ "yaw_bias": yaw_bias, # enum (camera-relative)
69
+ "elevation": elevation, # enum (camera-relative)
70
+ "near_side": near_side, # enum (object-centric)
71
+ "confidence": confidence,
72
+ }
73
+
74
+
75
+
76
+
77
+ def get_correct_orientation(orientation_data: Dict[str, Any]) -> Optional[str]:
78
+ """
79
+ Determines the correct 'near_side' based on facing direction and yaw bias.
80
+ This follows the "Golden Rule" often used in computer vision annotation.
81
+ """
82
+ facing = orientation_data.get("facing")
83
+ yaw_bias = orientation_data.get("yaw_bias")
84
+
85
+ if facing == "away from viewer":
86
+ if yaw_bias == "right":
87
+ return "object-right"
88
+ if yaw_bias == "left":
89
+ return "object-left"
90
+ elif facing == "toward viewer":
91
+ # Mirrored perspective
92
+ if yaw_bias == "right":
93
+ return "object-left"
94
+ if yaw_bias == "left":
95
+ return "object-right"
96
+
97
+ return None # Rule does not apply (e.g., 'side' view with no bias)
98
+
99
+ def process_directory(orientation_path: str, data_path:str, save_path: str):
100
+ """
101
+ Scans a directory for orientation files and corrects corresponding data files.
102
+ """
103
+ # categories
104
+ # categories = os.listdir(orientation_path)
105
+ categories = os.listdir(data_path)
106
+
107
+ for category in categories:
108
+ data_files = glob.glob(os.path.join(data_path, category, "*.json"))
109
+
110
+ if not data_files:
111
+ print(f"No data files (*_orientation.json) found in '{category}'.")
112
+ return
113
+
114
+ print(f"Found {len(data_files)} orientation files. Starting processing...")
115
+ print("-" * 50)
116
+
117
+ for data_file_path in data_files:
118
+ try:
119
+ # Derive the corresponding data file name
120
+ # Assumes data file is named like 'subject_name.json'
121
+ # and orientation is 'subject_name_orientation.json'
122
+ base_name = os.path.basename(data_file_path)
123
+ data_filename = base_name
124
+ orient_path = os.path.join(orientation_path, category, data_filename)
125
+
126
+ # print(os.listdir(os.path.join(data_path, category)))
127
+
128
+ os.makedirs(os.path.join(save_path, category), exist_ok=True)
129
+
130
+ if not os.path.exists(orient_path):
131
+ print(f"Warning: orient file '{data_filename}' not found for '{os.path.basename(orient_path)}'. Skipping.")
132
+ continue
133
+
134
+ # 1. Load orientation data
135
+ with open(orient_path, 'r') as f:
136
+ orientation_json = json.load(f)
137
+
138
+ print(orientation_json)
139
+ orientation_data = previous_flipped_near_side_orientation(
140
+ azimuth=orientation_json.get("azimuth"),
141
+ polar=orientation_json.get("polar"),
142
+ confidence=orientation_json.get("confidence")
143
+ )
144
+
145
+ print(orientation_data)
146
+ # break
147
+
148
+ original_near_side = orientation_data.get("near_side")
149
+ correct_near_side = get_correct_orientation(orientation_data)
150
+
151
+ # 2. Check if correction is needed
152
+ if not correct_near_side or original_near_side == correct_near_side:
153
+ print(f"No correction needed for {base_name}. Orientation is consistent.")
154
+ continue
155
+
156
+ # 3. Determine which words to swap
157
+ if "left" in original_near_side and "right" in correct_near_side:
158
+ wrong_side, correct_side = "left", "right"
159
+ elif "right" in original_near_side and "left" in correct_near_side:
160
+ wrong_side, correct_side = "right", "left"
161
+ else:
162
+ print(f"Skipping correction for {base_name}: Unclear which sides to swap.")
163
+ continue
164
+
165
+ print(f"Inconsistency found for '{base_name}'. Correcting '{wrong_side}' to '{correct_side}'.")
166
+
167
+ # 4. Load and process the main data file
168
+ with open(data_file_path, 'r') as f:
169
+ data = json.load(f)
170
+
171
+ change_count = 0
172
+ keypoints = data.get("keypoints", {})
173
+
174
+ for key, point_data in keypoints.items():
175
+ actor_output = point_data.get("actor_output", {})
176
+
177
+ # Fields to check and correct
178
+ fields_to_correct = ["part_location", "spatial_location"]
179
+
180
+ for field in fields_to_correct:
181
+ if field in actor_output and wrong_side in actor_output[field]:
182
+ original_text = actor_output[field]
183
+ # Replace both lowercase and capitalized versions
184
+ corrected_text = original_text.replace(wrong_side, correct_side)
185
+ corrected_text = corrected_text.replace(wrong_side.capitalize(), correct_side.capitalize())
186
+
187
+ if original_text != corrected_text:
188
+ actor_output[field] = corrected_text
189
+ change_count += 1
190
+
191
+ # 5. Save the corrected data
192
+ if change_count > 0:
193
+ # with open(data_path, 'w') as f:
194
+ # json.dump(data, f, indent=4)
195
+ # os.makedirs(save_path, exist_ok=True)
196
+ with open(os.path.join(save_path, category, data_filename), 'w') as f:
197
+ json.dump(data, f, indent=4)
198
+ print(f"Saved '{data_filename}' with {change_count} total corrections.")
199
+ else:
200
+ print(f"No instances of '{wrong_side}' found in '{data_filename}', though orientation was inconsistent.")
201
+
202
+ except json.JSONDecodeError:
203
+ print(f"Error: Invalid JSON in '{os.path.basename(orient_path)}' or its corresponding data file.")
204
+ except Exception as e:
205
+ print(f"An unexpected error occurred while processing '{os.path.basename(orient_path)}': {e}")
206
+ finally:
207
+ print("-" * 50)
208
+
209
+
210
+ if __name__ == "__main__":
211
+ # --- USAGE ---
212
+ # Set the path to the directory containing your JSON files.
213
+ # You can use "." for the current directory.
214
+ # target_directory = "."
215
+
216
+ orientation_path = '../../Datasets/SPair-71k/OrientationAnnotation_bgd_rmv/'
217
+ print(os.listdir(orientation_path))
218
+
219
+ data_path = './results_vlm/spair/GPT-5 Low Actor Critic Test 10'
220
+ print(os.listdir(data_path))
221
+
222
+ save_path = './results_vlm/spair/GPT-5 Low Actor Critic Test 10 Near Side Corrected'
223
+
224
+ process_directory(orientation_path=orientation_path, data_path=data_path, save_path=save_path)
Code/sc_dit/openai_api_key.py ADDED
@@ -0,0 +1 @@
 
 
1
+ OPENAI_API_KEY = "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"
Code/sc_dit/openai_utils.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ from pydantic import BaseModel, field_validator
3
+ import matplotlib.pyplot as plt
4
+ from typing import List, Union, Any, Optional
5
+
6
+
7
+ def img_to_data_url(path: str) -> str:
8
+ mime = "image/png" if path.lower().endswith(".png") else "image/jpeg"
9
+ with open(path, "rb") as f:
10
+ b64 = base64.b64encode(f.read()).decode()
11
+ return f"data:{mime};base64,{b64}"
12
+
13
+
14
+ # use structured output
15
+ class CoarseSemCorrBBox(BaseModel):
16
+ """Container for the assistant’s JSON array + convenient attrs."""
17
+ full_response: str # entire response
18
+
19
+ bbox_src: List[int]
20
+ bbox_tgt: Optional[List[int]] = None # optional, can be "none"
21
+ @field_validator("bbox_tgt", mode="before")
22
+ @classmethod
23
+ def validate_bbox_tgt(cls, v: Any) -> Optional[List[int]]:
24
+ """
25
+ Allows the LLM to return the string 'none' for a missing bounding box.
26
+ This validator intercepts the input before standard validation.
27
+ """
28
+ # If the input is the string "none", convert it to None
29
+ if v == "none":
30
+ return None
31
+ # Otherwise, return the original value for Pydantic to validate as a List[int]
32
+ return v
33
+
34
+ def build_messages(system_prompt: str, text_prompt: str, img1_url: str, img2_url: str):
35
+ """Package vision + text into OpenAI chat format."""
36
+ return [
37
+ {"role": "system", "content": system_prompt},
38
+ {"role": "user", "content": [
39
+ {"type": "text", "text": text_prompt},
40
+ {"type": "image_url", "image_url": {"url": img1_url}},
41
+ {"type": "image_url", "image_url": {"url": img2_url}},
42
+ ]},
43
+ ]
44
+
45
+
46
+ def make_preview(src_np, tgt_np, src_bbox, tgt_bbox):
47
+ """Return a matplotlib Figure with both images + optional boxes."""
48
+ fig, axes = plt.subplots(1, 2, figsize=(6, 3))
49
+ axes[0].imshow(src_np); axes[0].axis("off"); axes[0].set_title("Source")
50
+ axes[1].imshow(tgt_np); axes[1].axis("off"); axes[1].set_title("Target")
51
+
52
+ if src_bbox:
53
+ x1, y1, x2, y2 = src_bbox
54
+ axes[0].add_patch(plt.Rectangle((x1, y1), x2 - x1, y2 - y1,
55
+ ec="lime", fc="none", lw=2))
56
+ if tgt_bbox: #and tgt_bbox != "none":
57
+ x1, y1, x2, y2 = tgt_bbox
58
+ axes[1].add_patch(plt.Rectangle((x1, y1), x2 - x1, y2 - y1,
59
+ ec="lime", fc="none", lw=2))
60
+ fig.tight_layout()
61
+ return fig
Code/sc_dit/optimize_camera_pose_clean.py ADDED
@@ -0,0 +1,542 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import numpy as np
4
+ import imageio
5
+ import torch.nn as nn
6
+ import torch.nn.functional as F
7
+ import matplotlib.pyplot as plt
8
+ import glob
9
+ import tqdm
10
+ from pathlib import Path
11
+ from torch.optim.optimizer import Optimizer
12
+
13
+ # from scipy.io import savemat
14
+
15
+ # Scientific and Image Processing Libraries
16
+ from scipy.ndimage import distance_transform_edt as edt
17
+ from scipy.ndimage import gaussian_filter
18
+ from skimage import img_as_ubyte
19
+ from PIL import Image
20
+
21
+ # PyTorch3D Imports
22
+ # I/O
23
+ from pytorch3d.io import load_obj
24
+
25
+ # Data Structures
26
+ from pytorch3d.structures import Meshes
27
+
28
+ # 3D Transformations
29
+ from pytorch3d.transforms import Rotate, Translate
30
+
31
+ # Rendering Components
32
+ from pytorch3d.renderer import (
33
+ FoVPerspectiveCameras,
34
+ look_at_rotation,
35
+ RasterizationSettings,
36
+ MeshRenderer,
37
+ MeshRasterizer,
38
+ BlendParams,
39
+ SoftSilhouetteShader,
40
+ HardPhongShader,
41
+ PointLights,
42
+ TexturesVertex,
43
+ )
44
+
45
+ # Third-party Libraries
46
+ from geomloss import SamplesLoss # pip install geomloss
47
+
48
+ # Local Utilities
49
+ from sc_dit.utils import resize_and_pad, scaled_shifted_sigmoid, get_bbox_from_alpha
50
+
51
+ import wandb
52
+
53
+
54
+ # Set the processing device
55
+ if torch.cuda.is_available():
56
+ device = torch.device("cuda:0")
57
+ torch.cuda.set_device(device)
58
+ else:
59
+ device = torch.device("cpu")
60
+
61
+ def wasserstein_torch(mask1, mask2, p=2, blur=0.05, backend="tensorized"):
62
+ """
63
+ Calculates the differentiable Wasserstein (Sinkhorn) distance between two masks.
64
+
65
+ Args:
66
+ mask1 (np.array): The first binary mask.
67
+ mask2 (np.array): The second binary mask.
68
+ p (int): The exponent for the cost function.
69
+ blur (float): The Sinkhorn regularization strength.
70
+ backend (str): The backend for geomloss computation.
71
+
72
+ Returns:
73
+ torch.Tensor: A scalar tensor representing the Wasserstein distance.
74
+ """
75
+ mask1_t = torch.as_tensor(mask1, dtype=torch.float32, device=device)
76
+ mask2_t = torch.as_tensor(mask2, dtype=torch.float32, device=device)
77
+
78
+ H, W = mask1_t.shape
79
+
80
+ # Create a grid of coordinates.
81
+ coords_y, coords_x = torch.meshgrid(
82
+ torch.arange(H, dtype=torch.float32, device=device),
83
+ torch.arange(W, dtype=torch.float32, device=device),
84
+ indexing="ij"
85
+ )
86
+ coords = torch.stack([coords_x, coords_y], dim=-1).view(-1, 2)
87
+
88
+ # Normalize the masks to create probability distributions.
89
+ w1 = mask1_t.flatten()
90
+ # w1 = scaled_shifted_sigmoid(w1, a=2.0, b=5.0, c=0.0, d=-1) # Apply sigmoid to ensure non-negativity
91
+ w1 = w1 / w1.sum()
92
+
93
+ w2 = mask2_t.flatten()
94
+ w2 = w2 / w2.sum()
95
+
96
+ loss_fn = SamplesLoss(loss="sinkhorn", p=p, blur=blur, backend=backend)
97
+
98
+ return loss_fn(w1, coords, w2, coords)
99
+
100
+
101
+ class CameraPoseEstimator(nn.Module):
102
+ """
103
+ A model to optimize camera pose by minimizing the Wasserstein distance
104
+ between a rendered silhouette and a reference image.
105
+ """
106
+ def __init__(self,
107
+ meshes,
108
+ renderer,
109
+ image_ref,
110
+ x_init=3.0,
111
+ y_init=6.9,
112
+ z_init=2.5,
113
+ wass_res=64):
114
+ """
115
+ Initializes the camera pose estimator.
116
+
117
+ Args:
118
+ meshes (Meshes): The 3D mesh to be rendered.
119
+ renderer (MeshRenderer): The PyTorch3D renderer.
120
+ image_ref (torch.Tensor): The reference image tensor (H, W).
121
+ x_init (float): Initial x-coordinate of the camera.
122
+ y_init (float): Initial y-coordinate of the camera.
123
+ z_init (float): Initial z-coordinate of the camera.
124
+ wass_res (int): Resolution for downsampling images for Wasserstein loss.
125
+ """
126
+ super().__init__()
127
+ self.meshes = meshes
128
+ self.device = meshes.device
129
+ self.renderer = renderer
130
+ self.register_buffer("image_ref", image_ref)
131
+ self.wass_res = wass_res
132
+
133
+ # Register camera position as a learnable parameter.
134
+ self.camera_position = nn.Parameter(
135
+ torch.tensor([x_init, y_init, z_init], dtype=torch.float32, device=self.device)
136
+ )
137
+
138
+ def forward(self):
139
+ """
140
+ Performs a forward pass: renders the mesh and computes the loss.
141
+ """
142
+ # Get camera rotation and translation from its position.
143
+ R = look_at_rotation(self.camera_position[None, :], device=self.device)
144
+ T = -torch.bmm(R.transpose(1, 2), self.camera_position[None, :, None])[:, :, 0]
145
+
146
+ # Render the image.
147
+ image = self.renderer(meshes_world=self.meshes.clone(), R=R, T=T)
148
+
149
+ # Downsample the rendered and reference images for loss calculation.
150
+ rendered_alpha = image[..., 3].unsqueeze(0)
151
+ image_init_downsampled = F.interpolate(
152
+ rendered_alpha,
153
+ size=(self.wass_res, self.wass_res),
154
+ mode='bilinear',
155
+ align_corners=False
156
+ ).squeeze()
157
+
158
+ model_image_ref_downsampled = F.interpolate(
159
+ self.image_ref.unsqueeze(0).unsqueeze(0),
160
+ size=(self.wass_res, self.wass_res),
161
+ mode='bilinear',
162
+ align_corners=False
163
+ ).squeeze()
164
+
165
+ # Compute the Wasserstein loss.
166
+ loss = wasserstein_torch(
167
+ image_init_downsampled.float(),
168
+ model_image_ref_downsampled.float(),
169
+ p=2, blur=0.05, backend="tensorized"
170
+ )
171
+
172
+ return loss, image
173
+
174
+
175
+ def make_target(mask, mode="gauss", sigma_px=8, normalise=True):
176
+ """
177
+ Converts a binary mask into a target representation (Gaussian or SDF).
178
+
179
+ Args:
180
+ mask (np.array): A binary mask (H, W) where foreground is 1.
181
+ mode (str): The conversion mode, either 'gauss' or 'sdf'.
182
+ sigma_px (int): Sigma for the Gaussian filter.
183
+ normalise (bool): Whether to normalize the output to a standard range.
184
+
185
+ Returns:
186
+ np.array: The processed target image as a float32 array.
187
+ """
188
+ mask = mask.astype(bool)
189
+
190
+ if mode == "gauss":
191
+ target = gaussian_filter(mask.astype(np.float32), sigma=sigma_px)
192
+ if normalise:
193
+ target = (target - target.min()) / (target.max() - target.min() + 1e-6)
194
+ elif mode == "sdf":
195
+ # Signed Distance Function (SDF)
196
+ dist_out = edt(~mask)
197
+ dist_in = edt(mask)
198
+ sdf = dist_out.astype(np.float32)
199
+ sdf[mask] = -dist_in[mask]
200
+ if normalise:
201
+ sdf /= (np.abs(sdf).max() + 1e-6)
202
+ target = sdf
203
+ else:
204
+ raise ValueError("Mode must be 'gauss' or 'sdf'")
205
+
206
+ return target.astype(np.float32)
207
+
208
+ class ZerothOrderOptimizer(Optimizer):
209
+ """
210
+ Implements zeroth-order optimization using finite difference methods
211
+ to approximate gradients.
212
+ """
213
+ def __init__(self, params, lr=0.01, n_samples=10, delta=0.1):
214
+ """
215
+ Args:
216
+ params: iterable of parameters to optimize
217
+ lr (float): learning rate
218
+ n_samples (int): number of random directions to sample for gradient estimation
219
+ delta (float): perturbation scale for finite difference
220
+ """
221
+ defaults = dict(lr=lr, n_samples=n_samples, delta=delta)
222
+ super(ZerothOrderOptimizer, self).__init__(params, defaults)
223
+ def step(self, closure=None):
224
+ """Performs a single optimization step.
225
+ Args:
226
+ closure (callable, optional): A closure that reevaluates the model
227
+ and returns the loss.
228
+ """
229
+ loss = None
230
+ if closure is not None:
231
+ loss = closure()
232
+ for group in self.param_groups:
233
+ for p in group['params']:
234
+ if p.grad is None:
235
+ continue
236
+ # Parameters
237
+ lr = group['lr']
238
+ n_samples = group['n_samples']
239
+ delta = group['delta']
240
+ # Initialize gradient approximation
241
+ grad_approx = torch.zeros_like(p.data)
242
+ # Store original parameter value
243
+ original_params = p.data.clone()
244
+ # For each random direction
245
+ for i in range(n_samples):
246
+ # Generate random direction (uniform on unit sphere)
247
+ u = torch.randn_like(p.data)
248
+ u = u / (u.norm() + 1e-8)
249
+ # Forward direction evaluation
250
+ p.data = original_params + delta * u
251
+ loss_pos = closure().item()
252
+ # Backward direction evaluation
253
+ p.data = original_params - delta * u
254
+ loss_neg = closure().item()
255
+ # Two-point approximation of directional derivative
256
+ grad_direction = (loss_pos - loss_neg) / (2 * delta)
257
+ # Accumulate the gradient approximation
258
+ grad_approx += grad_direction * u
259
+ # Average over samples
260
+ grad_approx /= n_samples
261
+ # Restore original parameters
262
+ p.data = original_params
263
+ # Update parameters using the approximated gradient
264
+ p.data.add_(-lr * grad_approx)
265
+ return loss
266
+
267
+
268
+
269
+ def main(file_id, category, dataset, exp_note=None):
270
+ image_render_size = 256
271
+ # wass_res = 128
272
+ wass_res = 64
273
+ max_iter = 1000
274
+ learning_rate = 0.01 #0.05
275
+ min_iter = 50
276
+ patience = 1000 # Number of iterations to wait for improvement before stopping
277
+ rel_tol = 0.01 # Relative tolerance for early stopping
278
+
279
+
280
+
281
+ # --- 1. Initialize WandB ---
282
+ if exp_note is None:
283
+ exp_name = f"{dataset}_{category}_{file_id}"
284
+ else:
285
+ exp_name = f"{dataset}_{category}_{file_id}_{exp_note}"
286
+
287
+ run = wandb.init(
288
+ project="spair",
289
+ entity="amazon_intern2025",
290
+ # name=f"{dataset}_{category}_{file_id}",
291
+ name=exp_name,
292
+ config={
293
+ "file_id": file_id,
294
+ "category": category,
295
+ "dataset": dataset,
296
+ "image_render_size": image_render_size,
297
+ "wass_res": wass_res,
298
+ "max_iter": max_iter,
299
+ "learning_rate": learning_rate,
300
+ "min_iter": min_iter,
301
+ "patience": patience,
302
+ "rel_tol": rel_tol,
303
+ })
304
+
305
+ # --- 2. Path Setup ---
306
+ mesh_path = f'./example_data/{dataset}/{category}/{file_id}.obj' # no sparc by default
307
+ image_path = f'./example_data/{dataset}/{category}/{file_id}_bgd_rmv.png'
308
+ path_output = f'./example_results/{dataset}/{category}'
309
+ os.makedirs(path_output, exist_ok=True)
310
+ filename_output = f'{file_id}.gif'
311
+ path_file_output = os.path.join(path_output, filename_output)
312
+
313
+ # --- 3. Load Mesh ---
314
+ verts, faces_idx, _ = load_obj(mesh_path)
315
+ faces = faces_idx.verts_idx
316
+
317
+ # Initialize vertex colors and create a texture object.
318
+ verts_rgb = torch.ones_like(verts)[None] # (1, V, 3)
319
+ textures = TexturesVertex(verts_features=verts_rgb.to(device))
320
+
321
+ # Create a Meshes object.
322
+ obj_mesh = Meshes(
323
+ verts=[verts.to(device)],
324
+ faces=[faces.to(device)],
325
+ textures=textures
326
+ )
327
+
328
+ # --- 4. Setup Renderers ---
329
+ cameras = FoVPerspectiveCameras(device=device)
330
+
331
+ # Silhouette Renderer (for loss computation)
332
+ blend_params_sil = BlendParams(sigma=1e-4, gamma=1e-4)
333
+ raster_settings_sil = RasterizationSettings(
334
+ image_size=image_render_size,
335
+ blur_radius=np.log(1. / 1e-4 - 1.) * blend_params_sil.sigma,
336
+ faces_per_pixel=150, #100
337
+ )
338
+ silhouette_renderer = MeshRenderer(
339
+ rasterizer=MeshRasterizer(cameras=cameras, raster_settings=raster_settings_sil),
340
+ shader=SoftSilhouetteShader(blend_params=blend_params_sil)
341
+ )
342
+
343
+ # Phong Renderer (for visualization)
344
+ lights = PointLights(device=device, location=((2.0, 2.0, -2.0),))
345
+ raster_settings_phong = RasterizationSettings(
346
+ image_size=image_render_size,
347
+ blur_radius=0.0,
348
+ faces_per_pixel=1,
349
+ )
350
+ phong_renderer = MeshRenderer(
351
+ rasterizer=MeshRasterizer(cameras=cameras, raster_settings=raster_settings_phong),
352
+ shader=HardPhongShader(device=device, cameras=cameras, lights=lights)
353
+ )
354
+
355
+ # --- 5. Load and Process Reference Image ---
356
+ real_image_ref_rgba = imageio.imread(image_path)
357
+ print(f"Reference image shape: {real_image_ref_rgba.shape}")
358
+
359
+ print("Finding bounding box from alpha channel...")
360
+ bbox = get_bbox_from_alpha(Path(image_path)) # (x_min, y_min, x_max, y_max)
361
+
362
+ # Extract alpha channel and normalize.
363
+ # real_silhouette = torch.from_numpy(real_image_ref_rgba[..., 3]).float() / 255.0
364
+ # image_ref_padded = resize_and_pad(real_silhouette, output_size=image_render_size).to(device)
365
+
366
+ # Cropped image
367
+ with Image.open(image_path).convert("RGBA") as img:
368
+ # Crop the image using the bounding box
369
+ img_cropped = img.crop(bbox)
370
+
371
+ # Extract the alpha channel from the CROPPED image
372
+ real_silhouette_cropped = torch.from_numpy(np.array(img_cropped)[..., 3]).float() / 255.0
373
+ image_ref_padded = resize_and_pad(real_silhouette_cropped, output_size=image_render_size).to(device)
374
+
375
+ # Convert to a target representation (e.g., Gaussian blurred).
376
+ image_ref_final_pp = make_target(image_ref_padded.cpu().numpy(), sigma_px=1, normalise=True)
377
+ image_ref_final_pp = torch.from_numpy(image_ref_final_pp).to(device)
378
+ print(f"Final processed image shape: {image_ref_final_pp.shape}")
379
+
380
+ # --- 6. Optimization ---
381
+ writer = imageio.get_writer(path_file_output, mode='I', duration=0.3)
382
+
383
+ model = CameraPoseEstimator(
384
+ meshes=obj_mesh,
385
+ renderer=silhouette_renderer,
386
+ image_ref=image_ref_final_pp,
387
+ wass_res=wass_res
388
+ ).to(device)
389
+
390
+ optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
391
+ # optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9, nesterov=True)
392
+ # optimizer = ZerothOrderOptimizer(model.parameters(), lr=0.01, n_samples=5, delta=0.05)
393
+ # def closure():
394
+ # optimizer.zero_grad()
395
+ # loss, _ = model()
396
+ # # loss = criterion(output, targets)
397
+ # return loss
398
+
399
+ loss_list = []
400
+ best_loss = float('inf')
401
+ patience_counter = 0
402
+
403
+ print("\nStarting optimization with early stopping (relative tolerance)...")
404
+ for i in range(max_iter):
405
+ optimizer.zero_grad()
406
+ loss, _ = model()
407
+ loss.backward()
408
+ optimizer.step()
409
+
410
+ # Use closure for zeroth-order optimization
411
+ # loss = optimizer.step(closure)
412
+
413
+ loss_item = loss.item()
414
+ loss_list.append(loss_item)
415
+
416
+ run.log({
417
+ # "iteration": i,
418
+ "loss": loss_item,
419
+ # "camera_position": model.camera_position.detach().cpu().numpy().tolist()
420
+ })
421
+
422
+ # --- Updated Early Stopping Logic ---
423
+ # Check if the relative loss improvement is significant
424
+ if i > 0 and (best_loss - loss_item) / (best_loss + 1e-8) > rel_tol:
425
+ best_loss = loss_item
426
+ patience_counter = 0 # Reset patience on improvement
427
+ else:
428
+ # On the first iteration, just set the best_loss
429
+ if i == 0:
430
+ best_loss = loss_item
431
+ patience_counter += 1 # Increment patience if no significant improvement
432
+
433
+ print(
434
+ f"Iteration {i:04d}, Loss: {loss_item:.4f}, Best Loss: {best_loss:.4f}, Patience: {patience_counter}/{patience}"
435
+ )
436
+
437
+ # Stop if patience is exceeded
438
+ if i > min_iter and patience_counter >= patience:
439
+ print(f"\nStopping early at iteration {i}. Loss has not improved for {patience} iterations.")
440
+ break
441
+ # --- End of Updated Logic ---
442
+
443
+ # GIF saving logic remains the same
444
+ if i % 20 == 0:
445
+ R = look_at_rotation(model.camera_position[None, :], device=model.device)
446
+ T = -torch.bmm(R.transpose(1, 2), model.camera_position[None, :, None])[:, :, 0]
447
+
448
+ with torch.no_grad():
449
+ image = phong_renderer(meshes_world=model.meshes.clone(), R=R, T=T)
450
+
451
+ image = image[0, ..., :3].detach().cpu().numpy()
452
+ run.log({
453
+ "rendered_image": wandb.Image(image, caption=f"Iteration {i}")
454
+ })
455
+ writer.append_data(img_as_ubyte(image))
456
+
457
+ writer.close()
458
+ print("Optimization finished.")
459
+
460
+ run.finish()
461
+ # loss_list = []
462
+ # print("\nStarting optimization...")
463
+ # for i in range(max_iter):
464
+ # optimizer.zero_grad()
465
+ # loss, _ = model()
466
+ # loss.backward()
467
+ # optimizer.step()
468
+
469
+ # loss_item = loss.item()
470
+ # loss_list.append(loss_item)
471
+ # print(f"Iteration {i:04d}, Loss: {loss_item:.4f}")
472
+
473
+ # # Save an image frame for the GIF periodically.
474
+ # if i % 20 == 0:
475
+ # R = look_at_rotation(model.camera_position[None, :], device=model.device)
476
+ # T = -torch.bmm(R.transpose(1, 2), model.camera_position[None, :, None])[:, :, 0]
477
+
478
+ # with torch.no_grad():
479
+ # image = phong_renderer(meshes_world=model.meshes.clone(), R=R, T=T)
480
+
481
+ # image = image[0, ..., :3].detach().cpu().numpy()
482
+ # writer.append_data(img_as_ubyte(image))
483
+
484
+ # writer.close()
485
+ # print("Optimization finished.")
486
+
487
+ # --- 7. Final Results ---
488
+ # Plot loss curve.
489
+ plt.figure(figsize=(10, 5))
490
+ plt.plot(loss_list, label='Loss')
491
+ plt.xlabel('Iteration')
492
+ plt.ylabel('Loss Value')
493
+ plt.title('Loss Across Iterations')
494
+ plt.legend()
495
+ plt.grid(True)
496
+ plt.savefig(f"{path_file_output.replace('.gif', '')}_loss.png")
497
+ plt.show()
498
+
499
+ # Print the final optimized camera position.
500
+ final_cam_pos = model.camera_position.detach().cpu().numpy()
501
+ print(f"Final camera position: {final_cam_pos}")
502
+
503
+ # Save the final camera position to a json file.
504
+ import json
505
+ final_cam_pos_dict = {
506
+ "file_id": file_id,
507
+ "category": category,
508
+ "dataset": dataset,
509
+ "final_camera_position": final_cam_pos.tolist()
510
+ }
511
+ json_output_path = os.path.join(path_output, f"{file_id}_camera_position.json")
512
+ with open(json_output_path, 'w') as f:
513
+ json.dump(final_cam_pos_dict, f, indent=4)
514
+
515
+ if __name__ == "__main__":
516
+ # --- 1. Configuration ---
517
+ file_id = '2010_001614' # 2009_004845 2010_001614 2007_006483
518
+ category = 'motorbike' # person
519
+ dataset = 'spair'
520
+ exp_note = 'crop'
521
+
522
+ main(file_id, category, dataset, exp_note)
523
+
524
+
525
+ # #### multiple categories example ####
526
+ # dataset = 'spair'
527
+
528
+ # path_data = './example_data/' + dataset
529
+ # category_list = os.listdir(path_data)
530
+
531
+ # # path_all_files = glob.glob(os.path.join(path_data, '*', '*.obj'))
532
+ # for category in category_list:
533
+ # path_category = os.path.join(path_data, category)
534
+ # file_list = os.listdir(path_category)
535
+ # for file_id in file_list:
536
+ # if file_id.endswith('.obj'):
537
+ # file_id = file_id.replace('.obj', '')
538
+ # print(f"Processing {file_id} in {category}...")
539
+ # main(file_id, category, dataset)
540
+
541
+
542
+
Code/sc_dit/orient_anything_demo.ipynb ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "id": "28fc764a",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": []
10
+ }
11
+ ],
12
+ "metadata": {
13
+ "kernelspec": {
14
+ "display_name": "qwen-vl-flash-attn",
15
+ "language": "python",
16
+ "name": "python3"
17
+ },
18
+ "language_info": {
19
+ "name": "python",
20
+ "version": "3.11.13"
21
+ }
22
+ },
23
+ "nbformat": 4,
24
+ "nbformat_minor": 5
25
+ }
Code/sc_dit/part_dictionary.py ADDED
@@ -0,0 +1,436 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ PART_SCHEMA = {
2
+ "aeroplane": {
3
+ "tail": {
4
+ "is_symmetric": False,
5
+ "children": {
6
+ "horizontal stabilizer": {
7
+ "is_symmetric": True,
8
+ "children": {
9
+ "elevator": {"is_symmetric": True, "children": {}}
10
+ }
11
+ },
12
+ "vertical stabilizer": {
13
+ "is_symmetric": False,
14
+ "children": {
15
+ "rudder": {"is_symmetric": False, "children": {}}
16
+ }
17
+ }
18
+ }
19
+ },
20
+ "wing": {
21
+ "is_symmetric": True,
22
+ "children": {
23
+ "aileron": {"is_symmetric": True, "children": {}},
24
+ "flap": {"is_symmetric": True, "children": {}},
25
+ "engine": {"is_symmetric": True, "children": {}}
26
+ }
27
+ },
28
+ "fuselage": {
29
+ "is_symmetric": False,
30
+ "children": {
31
+ "cockpit": {"is_symmetric": False, "children": {}},
32
+ "nose": {"is_symmetric": False, "children": {}},
33
+ "landing gear": {
34
+ "is_symmetric": False,
35
+ "children": {
36
+ "main gear": {"is_symmetric": True, "children": {}},
37
+ "nose gear": {"is_symmetric": False, "children": {}}
38
+ }
39
+ }
40
+ }
41
+ }
42
+ },
43
+
44
+ "car": {
45
+ "wheel": {
46
+ "is_symmetric": True,
47
+ "children": {
48
+ "tire": {"is_symmetric": True, "children": {}},
49
+ "rim": {"is_symmetric": True, "children": {}}
50
+ }
51
+ },
52
+ "light": {
53
+ "is_symmetric": True,
54
+ "children": {
55
+ "headlight": {"is_symmetric": True, "children": {}},
56
+ "taillight": {"is_symmetric": True, "children": {}}
57
+ }
58
+ },
59
+ "glass": {
60
+ "is_symmetric": False,
61
+ "children": {
62
+ "windshield": {"is_symmetric": False, "children": {}},
63
+ "rear window": {"is_symmetric": False, "children": {}}
64
+ }
65
+ },
66
+ "body panel": {
67
+ "is_symmetric": False,
68
+ "children": {
69
+ "hood": {"is_symmetric": False, "children": {}},
70
+ "trunk": {"is_symmetric": False, "children": {}},
71
+ "fender": {"is_symmetric": True, "children": {}},
72
+ "door": {
73
+ "is_symmetric": True,
74
+ "children": {
75
+ "side mirror": {"is_symmetric": True, "children": {}},
76
+ "window": {"is_symmetric": True, "children": {}}
77
+ }
78
+ },
79
+ "bumper": {
80
+ "is_symmetric": False,
81
+ "children": {
82
+ "license_plate": {"is_symmetric": False, "children": {}}
83
+ }
84
+ }
85
+ }
86
+ }
87
+ },
88
+
89
+ "boat": {
90
+ "hull": {
91
+ "is_symmetric": False,
92
+ "children": {
93
+ "bow": {"is_symmetric": False, "children": {}},
94
+ "stern": {"is_symmetric": False, "children": {}},
95
+ "keel": {"is_symmetric": False, "children": {}}
96
+ }
97
+ },
98
+ "deck": {
99
+ "is_symmetric": False,
100
+ "children": {
101
+ "cabin": {"is_symmetric": False, "children": {}},
102
+ "trampoline": {"is_symmetric": False, "children": {}}
103
+ }
104
+ },
105
+ "mast": {
106
+ "is_symmetric": False,
107
+ "children": {
108
+ "boom": {"is_symmetric": False, "children": {}},
109
+ "sail": {"is_symmetric": False, "children": {}}
110
+ }
111
+ },
112
+ "steering": {
113
+ "is_symmetric": False,
114
+ "children": {
115
+ "rudder": {"is_symmetric": False, "children": {}}
116
+ }
117
+ }
118
+ },
119
+
120
+ "person": {
121
+ "head": {
122
+ "is_symmetric": False,
123
+ "children": {
124
+ "face": {
125
+ "is_symmetric": False,
126
+ "children": {
127
+ "eye": {"is_symmetric": True, "children": {}},
128
+ "ear": {"is_symmetric": True, "children": {}},
129
+ "nose": {"is_symmetric": False, "children": {}},
130
+ "mouth": {"is_symmetric": False, "children": {}}
131
+ }
132
+ }
133
+ }
134
+ },
135
+ "arm": {
136
+ "is_symmetric": True,
137
+ "children": {
138
+ "hand": {"is_symmetric": True, "children": {}}
139
+ }
140
+ },
141
+ "leg": {
142
+ "is_symmetric": True,
143
+ "children": {
144
+ "foot": {"is_symmetric": True, "children": {}}
145
+ }
146
+ },
147
+ "torso": {"is_symmetric": False, "children": {}}
148
+ },
149
+
150
+ "cat": {
151
+ "head": {
152
+ "is_symmetric": False,
153
+ "children": {
154
+ "ear": {"is_symmetric": True, "children": {}},
155
+ "eye": {"is_symmetric": True, "children": {}},
156
+ "nose": {"is_symmetric": False, "children": {}}
157
+ }
158
+ },
159
+ "leg": {
160
+ "is_symmetric": True,
161
+ "children": {
162
+ "paw": {"is_symmetric": True, "children": {}}
163
+ }
164
+ },
165
+ "torso": {
166
+ "is_symmetric": False,
167
+ "children": {
168
+ "tail": {"is_symmetric": False, "children": {}}
169
+ }
170
+ }
171
+ },
172
+
173
+ "dog": {
174
+ "head": {
175
+ "is_symmetric": False,
176
+ "children": {
177
+ "ear": {"is_symmetric": True, "children": {}},
178
+ "eye": {"is_symmetric": True, "children": {}},
179
+ "snout": {"is_symmetric": False, "children": {
180
+ "nose": {"is_symmetric": False, "children": {}}
181
+ }}
182
+ }
183
+ },
184
+ "leg": {
185
+ "is_symmetric": True,
186
+ "children": {
187
+ "paw": {"is_symmetric": True, "children": {}}
188
+ }
189
+ },
190
+ "torso": {
191
+ "is_symmetric": False,
192
+ "children": {
193
+ "tail": {"is_symmetric": False, "children": {}}
194
+ }
195
+ }
196
+ },
197
+
198
+ "horse": {
199
+ "head": {
200
+ "is_symmetric": False,
201
+ "children": {
202
+ "ear": {"is_symmetric": True, "children": {}},
203
+ "eye": {"is_symmetric": True, "children": {}},
204
+ "mane": {"is_symmetric": False, "children": {}}
205
+ }
206
+ },
207
+ "leg": {
208
+ "is_symmetric": True,
209
+ "children": {
210
+ "hoof": {"is_symmetric": True, "children": {}}
211
+ }
212
+ },
213
+ "torso": {
214
+ "is_symmetric": False,
215
+ "children": {
216
+ "tail": {"is_symmetric": False, "children": {}}
217
+ }
218
+ }
219
+ },
220
+
221
+ "cow": {
222
+ "head": {
223
+ "is_symmetric": False,
224
+ "children": {
225
+ "ear": {"is_symmetric": True, "children": {}},
226
+ "eye": {"is_symmetric": True, "children": {}},
227
+ "horn": {"is_symmetric": True, "children": {}}
228
+ }
229
+ },
230
+ "leg": {
231
+ "is_symmetric": True,
232
+ "children": {
233
+ "hoof": {"is_symmetric": True, "children": {}}
234
+ }
235
+ },
236
+ "torso": {
237
+ "is_symmetric": False,
238
+ "children": {
239
+ "udder": {"is_symmetric": False, "children": {}},
240
+ "tail": {"is_symmetric": False, "children": {}}
241
+ }
242
+ }
243
+ },
244
+
245
+ "sheep": {
246
+ "head": {
247
+ "is_symmetric": False,
248
+ "children": {
249
+ "ear": {"is_symmetric": True, "children": {}},
250
+ "eye": {"is_symmetric": True, "children": {}},
251
+ "horn": {"is_symmetric": True, "children": {}}
252
+ }
253
+ },
254
+ "leg": {
255
+ "is_symmetric": True,
256
+ "children": {
257
+ "hoof": {"is_symmetric": True, "children": {}}
258
+ }
259
+ },
260
+ "torso": {
261
+ "is_symmetric": False,
262
+ "children": {
263
+ "tail": {"is_symmetric": False, "children": {}}
264
+ }
265
+ }
266
+ },
267
+
268
+ "bird": {
269
+ "head": {
270
+ "is_symmetric": False,
271
+ "children": {
272
+ "beak": {"is_symmetric": False, "children": {}},
273
+ "eye": {"is_symmetric": True, "children": {}}
274
+ }
275
+ },
276
+ "wing": {"is_symmetric": True, "children": {}},
277
+ "tail": {"is_symmetric": False, "children": {}},
278
+ "foot": {"is_symmetric": True, "children": {}}
279
+ },
280
+
281
+ "bicycle": {
282
+ "frame": {
283
+ "is_symmetric": False,
284
+ "children": {
285
+ "fork": {"is_symmetric": False, "children": {}}
286
+ }
287
+ },
288
+ "drivetrain": {
289
+ "is_symmetric": False,
290
+ "children": {
291
+ "pedal": {"is_symmetric": True, "children": {}},
292
+ "chain": {"is_symmetric": False, "children": {}}
293
+ }
294
+ },
295
+ "control": {
296
+ "is_symmetric": False,
297
+ "children": {
298
+ "handlebar": {"is_symmetric": False, "children": {}},
299
+ "brake": {"is_symmetric": True, "children": {}}
300
+ }
301
+ },
302
+ "wheel": {
303
+ "is_symmetric": True,
304
+ "children": {
305
+ "tire": {"is_symmetric": True, "children": {}},
306
+ "rim": {"is_symmetric": True, "children": {}}
307
+ }
308
+ },
309
+ "seat": {"is_symmetric": False, "children": {}}
310
+ },
311
+
312
+ "motorbike": {
313
+ "handlebar": {
314
+ "is_symmetric": False,
315
+ "children": {
316
+ "mirror": {"is_symmetric": True, "children": {}}
317
+ }
318
+ },
319
+ "wheel": {
320
+ "is_symmetric": True,
321
+ "children": {
322
+ "tire": {"is_symmetric": True, "children": {}},
323
+ "rim": {"is_symmetric": True, "children": {}}
324
+ }
325
+ },
326
+ "headlight": {"is_symmetric": False, "children": {}},
327
+ "fender": {"is_symmetric": True, "children": {}},
328
+ "exhaust": {"is_symmetric": False, "children": {}},
329
+ "seat": {"is_symmetric": False, "children": {}},
330
+ "fuel tank": {"is_symmetric": False, "children": {}}
331
+ },
332
+
333
+ "bus": {
334
+ "body": {
335
+ "is_symmetric": False,
336
+ "children": {
337
+ "door": {"is_symmetric": True, "children": {}},
338
+ "window": {"is_symmetric": True, "children": {}},
339
+ "side panel": {"is_symmetric": True, "children": {}},
340
+ "roof": {"is_symmetric": False, "children": {}}
341
+ }
342
+ },
343
+ "light": {
344
+ "is_symmetric": True,
345
+ "children": {
346
+ "headlight": {"is_symmetric": True, "children": {}},
347
+ "taillight": {"is_symmetric": True, "children": {}}
348
+ }
349
+ },
350
+ "mirror": {"is_symmetric": True, "children": {}},
351
+ "wheel": {
352
+ "is_symmetric": True,
353
+ "children": {
354
+ "tire": {"is_symmetric": True, "children": {}},
355
+ "rim": {"is_symmetric": True, "children": {}}
356
+ }
357
+ }
358
+ },
359
+
360
+ "train": {
361
+ "car": {
362
+ "is_symmetric": False,
363
+ "children": {
364
+ "door": {"is_symmetric": True, "children": {}},
365
+ "window": {"is_symmetric": True, "children": {}},
366
+ "wheel": {"is_symmetric": True, "children": {}}
367
+ }
368
+ },
369
+ "roof": {
370
+ "is_symmetric": False,
371
+ "children": {
372
+ "pantograph": {"is_symmetric": False, "children": {}}
373
+ }
374
+ },
375
+ "front": {
376
+ "is_symmetric": False,
377
+ "children": {
378
+ "headlight": {"is_symmetric": True, "children": {}}
379
+ }
380
+ }
381
+ },
382
+
383
+ "chair": {
384
+ "seat": {
385
+ "is_symmetric": False,
386
+ "children": {
387
+ "backrest": {"is_symmetric": False, "children": {}},
388
+ "armrest": {"is_symmetric": True, "children": {}}
389
+ }
390
+ },
391
+ "leg": {"is_symmetric": True, "children": {}}
392
+ },
393
+
394
+ "tvmonitor": {
395
+ "screen": {
396
+ "is_symmetric": False,
397
+ "children": {
398
+ "bezel": {"is_symmetric": False, "children": {}},
399
+ "corner": {"is_symmetric": True, "children": {}}
400
+ }
401
+ },
402
+ "stand": {"is_symmetric": False, "children": {}}
403
+ },
404
+
405
+ "bottle": {
406
+ "neck": {
407
+ "is_symmetric": False,
408
+ "children": {
409
+ "cap": {"is_symmetric": False, "children": {}}
410
+ }
411
+ },
412
+ "body": {
413
+ "is_symmetric": False,
414
+ "children": {
415
+ "shoulder": {"is_symmetric": False, "children": {}}
416
+ }
417
+ },
418
+ "base": {"is_symmetric": False, "children": {}}
419
+ },
420
+
421
+ "pottedplant": {
422
+ "pot": {
423
+ "is_symmetric": False,
424
+ "children": {
425
+ "soil": {"is_symmetric": False, "children": {}}
426
+ }
427
+ },
428
+ "stem": {
429
+ "is_symmetric": False,
430
+ "children": {
431
+ "leaf": {"is_symmetric": True, "children": {}},
432
+ "flower": {"is_symmetric": True, "children": {}}
433
+ }
434
+ }
435
+ }
436
+ }
Code/sc_dit/predict_correspondence_vlm.py ADDED
@@ -0,0 +1,336 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from PIL import Image
3
+ import numpy as np
4
+ import matplotlib.pyplot as plt
5
+ from io import BytesIO
6
+ import os
7
+ import argparse
8
+ import ast
9
+
10
+ # data loading
11
+ from utils import load_pascal_data, load_spair_data, load_eval_data, load_img_and_kps
12
+ from dataset import get_dataset_info
13
+
14
+ # qwen model
15
+ from qwen_utils import QwenVLDetector
16
+
17
+ # logging
18
+ import wandb
19
+
20
+ import re
21
+
22
+ def extract_bounding_box(response: str):
23
+ """
24
+ Extracts the bounding box from a model response string.
25
+
26
+ Args:
27
+ response (str): The full response string containing the "Bounding box: [...]" line.
28
+
29
+ Returns:
30
+ list of int: A list of 4 integers [x1, y1, x2, y2] if found.
31
+ None: If no bounding box is found.
32
+ """
33
+ # Regex to find the bounding box line
34
+ bbox_match = re.search(r'Bounding box:\s*$\s*([0-9\s,]+)\s*$', response, re.MULTILINE)
35
+
36
+ print('bbox_match:', bbox_match)
37
+
38
+ if bbox_match:
39
+ # Extract and parse the coordinates
40
+ bbox_str = bbox_match.group(1)
41
+ bbox = [int(coord.strip()) for coord in bbox_str.split(',')]
42
+
43
+ if len(bbox) == 4:
44
+ return bbox
45
+ else:
46
+ raise ValueError(f"Expected 4 coordinates in bounding box, got {len(bbox)}: {bbox}")
47
+
48
+ return None
49
+
50
+ def create_image_with_one_kp(img, kps, kps_idx=0, circ_size=200, font_size=24, add_text=False, add_circle=True):
51
+ """
52
+ Plots a keypoint on an image and returns it as a new PIL Image with identical dimensions.
53
+
54
+ Args:
55
+ img (PIL.Image or np.ndarray): The input image.
56
+ kps (np.ndarray): Array of keypoints.
57
+ kps_idx (int): The index of the keypoint to plot.
58
+
59
+ Returns:
60
+ PIL.Image: A new image with the keypoint plotted, matching the input dimensions.
61
+ """
62
+ # 1. Get original image dimensions in pixels
63
+ if isinstance(img, Image.Image):
64
+ w_px, h_px = img.size
65
+ else: # Assuming NumPy array
66
+ h_px, w_px, _ = img.shape
67
+
68
+ # 2. Set a fixed physical size (e.g., 6 inches) and calculate the required DPI
69
+ # to make the output pixel count match the input.
70
+ figsize_inches = 6
71
+ dpi = w_px / figsize_inches
72
+
73
+ # 3. Create the figure with the calculated figsize and DPI
74
+ fig, ax = plt.subplots(1, 1, figsize=(figsize_inches, figsize_inches), dpi=dpi)
75
+
76
+ # 4. Remove all padding and whitespace from the figure
77
+ fig.subplots_adjust(left=0, right=1, top=1, bottom=0)
78
+
79
+ # 5. Plot the image and keypoint
80
+ ax.imshow(np.array(img))
81
+ if add_circle:
82
+ ax.scatter(kps[kps_idx, 0], kps[kps_idx, 1], edgecolors='red', s=circ_size, facecolors='none', linewidths=2)
83
+ if add_text:
84
+ ax.text(kps[kps_idx, 0], kps[kps_idx, 1], "Ref", color='red', fontsize=font_size, verticalalignment='center', horizontalalignment='center')
85
+ ax.axis('off')
86
+
87
+ # 6. Save the figure to an in-memory buffer
88
+ buf = BytesIO()
89
+ fig.savefig(buf, format='png', dpi=dpi)
90
+
91
+ # 7. Close the figure to free memory
92
+ plt.close(fig)
93
+
94
+ # 8. Create a PIL Image from the buffer
95
+ buf.seek(0)
96
+ result_img = Image.open(buf)
97
+
98
+ return result_img
99
+
100
+ # def get_bbox_semantic_corr(img1, img2, img1_kps, kps_idx_list, vocab, prompt_template, save_path='./results_qwen_demo/'):
101
+ # text_prompt = prompt_template.format("\n".join(f"- {term}" for term in vocab))
102
+ # # print(text_prompt)
103
+
104
+ # for kps_idx in kps_idx_list:
105
+ # print(f"Processing keypoint index: {kps_idx}")
106
+ # img1_kp = create_image_with_one_kp(img1, img1_kps, kps_idx=kps_idx)
107
+ # # plt.imshow(np.array(img1_kp))
108
+ # # plt.axis('off')
109
+ # # plt.show()
110
+
111
+ # results = model.chat(img1_kp, text_prompt, prompt_type='self-handled')
112
+ # print(results)
113
+
114
+
115
+ # extracted_semantics = results.split("Keypoint component:")[-1].strip()
116
+ # refined_prompt = "Locate the most prominent instance of {} in the image. If found, return a single bounding box in [x1, y1, x2, y2] format. If not found, return an empty list.".format(extracted_semantics)
117
+ # # refined_prompt = "Outline the position of each **{text_prompt}** in the image and output all bounding box coordinates in JSON format"
118
+ # print(refined_prompt)
119
+
120
+ # results_tgt = model.predict(img2, refined_prompt, prompt_type='object', bbox_refine=False)
121
+ # # model.plot_bounding_boxes(img2, results_tgt['response'], results_tgt['input_size'], output_path='./results_qwen_demo/{}_kps_{}.png'.format(tgt_id, kps_idx))
122
+ # bbox = model.predict_bounding_boxes(img2, results_tgt['response'], results_tgt['input_size'])
123
+
124
+ # fig, axes = plt.subplots(1, 2, figsize=(12, 6))
125
+ # axes[0].imshow(np.array(img1_kp))
126
+ # axes[0].axis('off')
127
+ # axes[0].set_title('Source Image with Keypoint')
128
+ # axes[1].imshow(np.array(img2))
129
+ # axes[1].axis('off')
130
+ # axes[1].set_title('Target Image with Bounding Box')
131
+ # # Draw the bounding box on the target image
132
+ # if bbox:
133
+ # abs_x1, abs_y1, abs_x2, abs_y2 = bbox
134
+ # axes[1].add_patch(plt.Rectangle((abs_x1, abs_y1), abs_x2 - abs_x1, abs_y2 - abs_y1, edgecolor='red', facecolor='none', linewidth=2))
135
+ # fig.tight_layout()
136
+ # plt.axis('off')
137
+ # fig.savefig(os.path.join(save_path, f'{tgt_id}_kps_{kps_idx}.png'), bbox_inches='tight')
138
+ # plt.show()
139
+
140
+ # print(results_tgt['response'])
141
+
142
+ def main(args):
143
+ #
144
+ data_dir, categories, split = get_dataset_info(args, split='test')
145
+
146
+ with open(args.SYSTEM_PROMPT_SEM, 'r') as f:
147
+ system_prompt_sem = f.read()
148
+
149
+ with open(args.SYSTEM_PROMPT_BBOX, 'r') as f:
150
+ system_prompt_bbox = f.read()
151
+
152
+ with open(args.TASK_PROMPT_SEM, 'r') as f:
153
+ task_prompt_sem = f.read()
154
+
155
+ with open(args.TASK_PROMPT_BBOX, 'r') as f:
156
+ task_prompt_bbox = f.read()
157
+
158
+ # initialze the Qwen VLM model
159
+ model = QwenVLDetector(model_dir=None, torch_dtype=torch.bfloat16, model_name="Qwen/Qwen2.5-VL-32B-Instruct", device="auto", flash_attn=True)
160
+
161
+ # initialize the WandB logger
162
+ run = wandb.init(
163
+ project=args.EVAL_DATASET,
164
+ entity="amazon_intern2025",
165
+ # name=f"{dataset}_{category}_{file_id}",
166
+ name=args.EXP_NOTE,
167
+ config={
168
+ # "category": category,
169
+ "dataset": args.EVAL_DATASET,
170
+ })
171
+
172
+ # Define the columns for your table
173
+ table_columns = [
174
+ "category", "src_id", "tgt_id", "kpt_id", "plot",
175
+ "extracted_semantics", "src_response", "src_bbox",
176
+ "tgt_response", "tgt_bbox"
177
+ ]
178
+ # Create the table object that will be populated during the run.
179
+ results_table = wandb.Table(columns=table_columns)
180
+
181
+ for category in categories:
182
+
183
+ print(f"Processing category: {category}")
184
+ category_prompt_sem = task_prompt_sem.format(class_name=category)
185
+ # print(f"Category prompt for semantic extraction: {category_prompt_sem}")
186
+
187
+ files, kps, thresholds, used_points = load_eval_data(args, data_dir, category, split)
188
+ # # print(category_prompt) # N: number of pairs
189
+ # print(f"Number of files: {len(files)}") # 2*N images, (source, target)
190
+ # print(f"Number of keypoints: {len(kps)}") # 2*N
191
+ # print(f"Number of thresholds: {len(thresholds)}") # N, max bounding box for target pair
192
+ # # print(thresholds)
193
+ # print(f"Number of used points: {len(used_points)}") # .any() points across all pairs in the same category
194
+
195
+ N = len(files) // 2 # Number of pairs
196
+
197
+ for pair_idx in range(N):
198
+ img1, img1_kps = load_img_and_kps(idx=2*pair_idx, files=files, kps=kps, img_size=args.ANNO_SIZE, edge=False)
199
+ img2, img2_kps = load_img_and_kps(idx=2*pair_idx+1, files=files, kps=kps, img_size=args.ANNO_SIZE, edge=False)
200
+
201
+ src_id = files[2*pair_idx].split('/')[-1].split('.')[0]
202
+ tgt_id = files[2*pair_idx+1].split('/')[-1].split('.')[0]
203
+
204
+ print(f"Processing pair {pair_idx}: {src_id} (source) and {tgt_id} (target)")
205
+
206
+ # check the visibility of the source keypoints
207
+ for kps_idx in range(img1_kps.shape[0]):
208
+ if img1_kps[kps_idx, 2] == 1:
209
+ print(f"Keypoint {kps_idx} in image 1 is visible.")
210
+
211
+ img1_kp = create_image_with_one_kp(img1, img1_kps, kps_idx=kps_idx, add_text=False, add_circle=True)
212
+
213
+ src_results = model.chat(img1_kp, category_prompt_sem, system_prompt=system_prompt_sem, prompt_type='self-handled')
214
+ print(f"Source results: {src_results['response']}")
215
+
216
+ original_size = img1_kp.size
217
+ bbox_with_label_src = model._get_bounding_boxes(
218
+ src_results['response'],
219
+ src_results['input_size'],
220
+ original_size
221
+ )
222
+
223
+
224
+ src_bbox = list(bbox_with_label_src.values())[0]['bbox'] if bbox_with_label_src else None
225
+ # print(f"Extracted bounding box from source image: {src_bbox}")
226
+ # print(f"Results for keypoint {kps_idx}: {src_results['response']}")
227
+ # print(f"Extracted bounding box: {src_bbox}")
228
+
229
+ extracted_semantics = src_results['response'].split("Keypoint component:")[-1].strip()
230
+ print(f"Extracted semantics: {extracted_semantics}")
231
+
232
+
233
+ curr_prompt_bbox = task_prompt_bbox.format(class_name=category, extracted_semantics=extracted_semantics)
234
+
235
+ print(f"Refined task prompt for bounding box: {curr_prompt_bbox}")
236
+
237
+
238
+ results_tgt = model.predict(img2, curr_prompt_bbox, system_prompt=system_prompt_bbox, prompt_type='object', bbox_refine=False)
239
+ tgt_bbox = model.predict_bounding_boxes(img2, results_tgt['response'], results_tgt['input_size'])
240
+
241
+ print(f"Results for target image: {results_tgt['response']}")
242
+
243
+
244
+ # logging the results
245
+ fig, axes = plt.subplots(1, 2, figsize=(12, 6))
246
+ axes[0].imshow(np.array(img1_kp))
247
+ axes[0].axis('off')
248
+ axes[0].set_title('Source Image with Keypoint and Bounding Box')
249
+ axes[1].imshow(np.array(img2))
250
+ axes[1].axis('off')
251
+ axes[1].set_title('Target Image with Bounding Box')
252
+ if src_bbox:
253
+ abs_x1, abs_y1, abs_x2, abs_y2 = src_bbox
254
+ axes[0].add_patch(plt.Rectangle((abs_x1, abs_y1), abs_x2 - abs_x1, abs_y2 - abs_y1, edgecolor='green', facecolor='none', linewidth=2))
255
+ else:
256
+ axes[0].text(150, 30, "No bounding box found", color='red', fontsize=12, ha='center', va='center')
257
+ if tgt_bbox:
258
+ abs_x1, abs_y1, abs_x2, abs_y2 = tgt_bbox
259
+ axes[1].add_patch(plt.Rectangle((abs_x1, abs_y1), abs_x2 - abs_x1, abs_y2 - abs_y1, edgecolor='green', facecolor='none', linewidth=2))
260
+ else:
261
+ axes[1].text(150, 30, "No bounding box found", color='red', fontsize=12, ha='center', va='center')
262
+
263
+ fig.tight_layout()
264
+ plt.axis('off')
265
+ # wandb.log({f"pair_{pair_idx}/kps_{kps_idx}": fig})
266
+
267
+
268
+ wandb.log({
269
+ f"{category}/plot": wandb.Image(fig),
270
+ # f"{category}/src_id": src_id,
271
+ # f"{category}/tgt_id": tgt_id,
272
+ # f"{category}/kpt_id": kps_idx,
273
+ # f"{category}/extracted_semantics": extracted_semantics,
274
+ })
275
+ plt.close(fig) # Close plot to free memory
276
+
277
+ # --- 3. ADD DATA TO FINAL TABLE ---
278
+ # Add the same data to the table object in memory.
279
+ results_table.add_data(
280
+ category,
281
+ src_id,
282
+ tgt_id,
283
+ kps_idx,
284
+ wandb.Image(fig), # Create a new wandb.Image as the figure was closed
285
+ extracted_semantics,
286
+ src_results['response'],
287
+ str(src_bbox),
288
+ results_tgt['response'],
289
+ str(tgt_bbox)
290
+ )
291
+
292
+ # run.log({"evaluation_results": results_table})
293
+
294
+ # break
295
+
296
+
297
+ # break
298
+
299
+ # print(f"img1_kps shape: {img1_kps.shape}")
300
+
301
+ # break
302
+
303
+ # print(category_prompt)
304
+
305
+ print('Finished processing all categories and logging results.')
306
+ wandb.log({"evaluation_results": results_table})
307
+ wandb.finish()
308
+ print('WandB run finished.')
309
+
310
+ # return None
311
+
312
+
313
+
314
+ if __name__ == "__main__":
315
+ parser = argparse.ArgumentParser(description="Predict correspondence using Qwen VLM.")
316
+ # parser.add_argument('--data_path', type=str, required=True, help='Path to the dataset directory.')
317
+ parser.add_argument('--SYSTEM_PROMPT_SEM', type=str, required=True, help='Path to the .txt file containing the prompt template.')
318
+ parser.add_argument('--SYSTEM_PROMPT_BBOX', type=str, required=True, help='Path to the task prompt file.')
319
+
320
+ parser.add_argument('--TASK_PROMPT_SEM', type=str, required=True, help='Path to the prompt template file.')
321
+ parser.add_argument('--TASK_PROMPT_BBOX', type=str, required=True, help='Path to the task prompt file.')
322
+
323
+ parser.add_argument('--EVAL_DATASET', type=str, required=True, choices=['pascal', 'spair'], help='Dataset to evaluate on.')
324
+ parser.add_argument('--ANNO_SIZE', type=int, default=840, help='Size of the annotation circle in pixels.')
325
+ parser.add_argument('--TEST_SAMPLE', type=int, default=0, help='Number of test samples to process.')
326
+ # parser.add_argument
327
+ # parser.add_argument('--img1_path', type=str, required=True, help='Path to the first image.')
328
+ # parser.add_argument('--img2_path', type=str, required=True, help='Path to the second image.')
329
+ # parser.add_argument('--kps_path', type=str, required=True, help='Path to the keypoints file.')
330
+ # parser.add_argument('--prompt_template', type=str, required=True, help='Template for the prompt.')
331
+
332
+ parser.add_argument('--EXP_NOTE', type=str, default='Qwen VLM demo', help='Experiment note for WandB logging.')
333
+
334
+ args = parser.parse_args()
335
+
336
+ main(args)
Code/sc_dit/qwen_gpt_proof_of_concept_actor_critic_detailed.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
Code/sc_dit/qwen_gpt_proof_of_concept_actor_critic_simplified.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
Code/sc_dit/qwen_proof_of_concept_crop.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
Code/sc_dit/qwen_single_demo.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
Code/sc_dit/qwen_utils.py ADDED
@@ -0,0 +1,1452 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # class PromptManager():
2
+ # def __init__(self):
3
+ # pass
4
+
5
+ # def construct_prompt(self, text_prompt, prompt_type):
6
+ # if prompt_type.lower() == 'object':
7
+ # prompt = f"""Outline the position of each {text_prompt} and output all bounding box coordinates in JSON format"""
8
+ # elif prompt_type.lower() == 'self-handled':
9
+ # prompt = text_prompt
10
+ # else:
11
+ # raise NotImplementedError
12
+
13
+ # return prompt
14
+
15
+ import os
16
+ from os import path
17
+ from PIL import Image, ImageDraw, ImageFont
18
+ from PIL import ImageColor
19
+ import json
20
+ import requests
21
+ from io import BytesIO
22
+ from urllib.parse import urlparse
23
+ import pathlib
24
+ import torch
25
+ from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor
26
+ from qwen_vl_utils import process_vision_info
27
+
28
+ class PromptManager:
29
+ """
30
+ Builds a prompt that forces the LLM to return
31
+ a *pure* JSON list of bounding-box dictionaries:
32
+
33
+ [
34
+ {"bbox_2d": [x1, y1, x2, y2], "label": "<your-label>"},
35
+ ...
36
+ ]
37
+ """
38
+
39
+ @staticmethod
40
+ def construct_prompt(text_prompt: str, prompt_type: str = "object") -> str:
41
+ if prompt_type.lower() == "object":
42
+ prompt = text_prompt+"""
43
+ Required format (copy this structure exactly):
44
+ ```json
45
+ [
46
+ {"bbox_2d": [x1, y1, x2, y2], "label": "<your-label>"},
47
+ ]
48
+ ```
49
+ """
50
+ return prompt
51
+
52
+ elif prompt_type.lower() == "object_with_conf":
53
+ prompt = text_prompt+"""
54
+ Required format (copy this structure exactly):
55
+ ```json
56
+ [
57
+ {"bbox_2d": [x1, y1, x2, y2], "label": "<your-label>", "confidence": <confidence-value>},
58
+ ]
59
+ ```
60
+ """
61
+ return prompt
62
+ elif prompt_type.lower() == "self-handled":
63
+ return text_prompt
64
+ else:
65
+ raise NotImplementedError("prompt_type must be 'object' or 'self-handled'")
66
+
67
+
68
+ def load_image(source, image_path=None):
69
+ try:
70
+ parsed = urlparse(source)
71
+ is_url = bool(parsed.scheme and parsed.netloc)
72
+
73
+ if is_url:
74
+ response = requests.get(source, stream=True)
75
+ response.raise_for_status()
76
+ img = Image.open(BytesIO(response.content))
77
+ else:
78
+ if path.exists(source):
79
+ img = Image.open(source)
80
+ else:
81
+ print(f"Error: Local file not found at {source}")
82
+ return None
83
+
84
+ if image_path is not None:
85
+ directory = path.dirname(image_path)
86
+ if directory and not path.exists(directory):
87
+ pathlib.Path(directory).mkdir(parents=True, exist_ok=True)
88
+ img.save(image_path)
89
+ print(f"Image saved to {image_path}")
90
+
91
+ return img
92
+
93
+ except Exception as e:
94
+ print(f"Error loading image: {e}")
95
+ return None
96
+
97
+
98
+ class QwenVLDetector():
99
+ def __init__(self, model_dir=None, torch_dtype=torch.bfloat16, device="cuda:0", model_name="Qwen/Qwen2.5-VL-7B-Instruct", flash_attn=False):
100
+ if model_dir is not None:
101
+ self.model = Qwen2_5_VLForConditionalGeneration.from_pretrained(model_dir, torch_dtype=torch_dtype, device_map=device, local_files_only=True)
102
+ self.processor = AutoProcessor.from_pretrained(model_dir)
103
+ else:
104
+ attn_impl = "flash_attention_2" if flash_attn else "sdpa"
105
+ self.model = Qwen2_5_VLForConditionalGeneration.from_pretrained(model_name, torch_dtype=torch_dtype, device_map=device, attn_implementation=attn_impl)
106
+ self.processor = AutoProcessor.from_pretrained(model_name)
107
+
108
+
109
+ self.processor.tokenizer.padding_side = "left"
110
+ self.prompt_manager = PromptManager()
111
+ self._init_parameters()
112
+
113
+ def _init_parameters(self):
114
+ self.max_size = 1024
115
+ self.max_new_tokens = 1024
116
+ self.min_pixels = 512 * 512
117
+ self.max_pixels = self.max_size * self.max_size
118
+
119
+ def resize_image(self, img, max_size):
120
+ width, height = img.size
121
+ if max(width, height) <= max_size:
122
+ return img
123
+ scaling_factor = max_size / max(width, height)
124
+ new_width = int(width * scaling_factor)
125
+ new_height = int(height * scaling_factor)
126
+ return img.resize((new_width, new_height), Image.Resampling.LANCZOS)
127
+
128
+ def _parse_json(self, response_text):
129
+ """
130
+ Parses a JSON object from a model's response text.
131
+ Prioritizes extracting from a ```json markdown fence.
132
+ """
133
+ try:
134
+ # Priority 1: Look for a JSON markdown fence. This is the most reliable.
135
+ if "```json" in response_text:
136
+ # Isolate the string between the fences
137
+ json_str = response_text.split("```json")[1].split("```")[0]
138
+ return json_str.strip()
139
+
140
+ # Priority 2: If no fence, find the first '[' and last ']'
141
+ # This is less reliable and can cause the error you saw.
142
+ start_idx = response_text.find('[')
143
+ end_idx = response_text.rfind(']')
144
+
145
+ if start_idx != -1 and end_idx != -1 and end_idx > start_idx:
146
+ return response_text[start_idx : end_idx + 1]
147
+
148
+ except (IndexError, json.JSONDecodeError):
149
+ # This will catch errors from malformed markdown or other slicing issues.
150
+ pass
151
+
152
+ # Return None if no valid JSON is found
153
+ return None
154
+
155
+ def calculate_iou(self, box1, box2):
156
+ x1_1, y1_1, x2_1, y2_1 = box1
157
+ x1_2, y1_2, x2_2, y2_2 = box2
158
+ area1 = (x2_1 - x1_1) * (y2_1 - y1_1)
159
+ area2 = (x2_2 - x1_2) * (y2_2 - y1_2)
160
+ x1_i, y1_i = max(x1_1, x1_2), max(y1_1, y1_2)
161
+ x2_i, y2_i = min(x2_1, x2_2), min(y2_1, y2_2)
162
+ if x2_i <= x1_i or y2_i <= y1_i:
163
+ return 0.0
164
+ intersection_area = (x2_i - x1_i) * (y2_i - y1_i)
165
+ union_area = area1 + area2 - intersection_area
166
+ return intersection_area / union_area if union_area > 0 else 0.0
167
+
168
+ def _check_bbox(self, bbox_with_label, bbox_coor, size, threshold=0.7):
169
+ x1, y1, x2, y2 = bbox_coor
170
+ width, height = size
171
+ if (x2 - x1) * (y2 - y1) / (width * height) >= threshold:
172
+ return False
173
+ for bbox_label in bbox_with_label.values():
174
+ if self.calculate_iou(bbox_label['bbox'], bbox_coor) >= threshold:
175
+ return False
176
+ return True
177
+
178
+ def _get_bounding_boxes(self, response, input_size, size):
179
+ bounding_boxes_str = self._parse_json(response)
180
+ print(f"Bounding boxes string: {bounding_boxes_str}")
181
+ if not bounding_boxes_str:
182
+ return {}
183
+
184
+ input_width, input_height = input_size
185
+ width, height = size
186
+
187
+ try:
188
+ # Use robust, standard JSON parsing
189
+ json_output = json.loads(bounding_boxes_str)
190
+ except json.JSONDecodeError:
191
+ print(f"Warning: Could not decode JSON from model output: {bounding_boxes_str}")
192
+ return {}
193
+
194
+ bbox_with_label = {}
195
+ if isinstance(json_output, list) and len(json_output) > 0:
196
+ for i, bounding_box in enumerate(json_output):
197
+ try:
198
+ coords = bounding_box["bbox_2d"]
199
+ abs_x1 = int(coords[0] / input_width * width)
200
+ abs_y1 = int(coords[1] / input_height * height)
201
+ abs_x2 = int(coords[2] / input_width * width)
202
+ abs_y2 = int(coords[3] / input_height * height)
203
+
204
+ if abs_x1 > abs_x2: abs_x1, abs_x2 = abs_x2, abs_x1
205
+ if abs_y1 > abs_y2: abs_y1, abs_y2 = abs_y2, abs_y1
206
+
207
+ if self._check_bbox(bbox_with_label, [abs_x1, abs_y1, abs_x2, abs_y2], size):
208
+ bbox_with_label[i] = {'bbox': [abs_x1, abs_y1, abs_x2, abs_y2], 'label': bounding_box.get('label')}
209
+
210
+ except (KeyError, IndexError, TypeError) as e:
211
+ print(f"Error processing bounding box {bounding_box}: {e}")
212
+
213
+ return bbox_with_label
214
+
215
+ def _get_bounding_boxes_with_conf(self, response, input_size, size):
216
+ bounding_boxes_str = self._parse_json(response)
217
+ print(f"Bounding boxes string: {bounding_boxes_str}")
218
+ if not bounding_boxes_str:
219
+ return {}
220
+
221
+ input_width, input_height = input_size
222
+ width, height = size
223
+
224
+ try:
225
+ json_output = json.loads(bounding_boxes_str)
226
+ except json.JSONDecodeError:
227
+ print(f"Warning: Could not decode JSON from model output: {bounding_boxes_str}")
228
+ return {}
229
+
230
+ bbox_with_label = {}
231
+ if isinstance(json_output, list) and len(json_output) > 0:
232
+ for i, bounding_box in enumerate(json_output):
233
+ try:
234
+ label = bounding_box.get('label')
235
+ # NEW: Get the confidence score, default to None if not present
236
+ confidence = bounding_box.get('confidence')
237
+ coords = bounding_box.get("bbox_2d")
238
+
239
+ # NEW: Handle null bounding boxes
240
+ if coords is None:
241
+ # Store the prediction even if there's no box
242
+ bbox_with_label[i] = {'bbox': None, 'label': label, 'confidence': confidence}
243
+ continue # Move to the next item
244
+
245
+ # If we have coordinates, process them
246
+ abs_x1 = int(coords[0] / input_width * width)
247
+ abs_y1 = int(coords[1] / input_height * height)
248
+ abs_x2 = int(coords[2] / input_width * width)
249
+ abs_y2 = int(coords[3] / input_height * height)
250
+
251
+ if abs_x1 > abs_x2: abs_x1, abs_x2 = abs_x2, abs_x1
252
+ if abs_y1 > abs_y2: abs_y1, abs_y2 = abs_y2, abs_y1
253
+
254
+ processed_bbox = [abs_x1, abs_y1, abs_x2, abs_y2]
255
+
256
+ # Check for overlap and size constraints
257
+ if self._check_bbox(bbox_with_label, processed_bbox, size):
258
+ bbox_with_label[i] = {
259
+ 'bbox': processed_bbox,
260
+ 'label': label,
261
+ 'confidence': confidence # NEW: Store confidence
262
+ }
263
+
264
+ except (KeyError, IndexError, TypeError) as e:
265
+ print(f"Error processing bounding box {bounding_box}: {e}")
266
+
267
+ return bbox_with_label
268
+
269
+ def chat(self, image, text_prompt, system_prompt, prompt_type):
270
+ curr_prompt = self.prompt_manager.construct_prompt(text_prompt, prompt_type)
271
+
272
+ # Handle the image input: either load from URL or use the PIL Image directly
273
+ if isinstance(image, str):
274
+ # It's a URL or path, load the image
275
+ image_type = 'str'
276
+ image_pil = load_image(image)
277
+ size = image_pil.size
278
+ messages = [
279
+ {
280
+ "role": "system",
281
+ "content": system_prompt
282
+ },
283
+ {
284
+ "role": "user",
285
+ "content": [
286
+ {
287
+ "type": "text",
288
+ "text": curr_prompt
289
+ },
290
+ {
291
+ "type": "image",
292
+ "image": image,
293
+ "min_pixels": self.min_pixels,
294
+ "max_pixels": self.max_pixels
295
+ }
296
+ ]
297
+ }
298
+ ]
299
+
300
+ text = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
301
+ image_inputs, video_inputs = process_vision_info(messages)
302
+ inputs = self.processor(text=[text],
303
+ images=image_inputs,
304
+ videos=video_inputs,
305
+ padding=True,
306
+ return_tensors="pt"
307
+ ).to(self.model.device)
308
+
309
+ else:
310
+ image_type = 'pil'
311
+ # Assume it's already a PIL Image
312
+ image_pil = image
313
+ # For the message content, we need to use the image directly
314
+
315
+ size = image_pil.size
316
+ image_for_message = self.resize_image(image_pil, self.max_size)
317
+
318
+ messages = [
319
+ {
320
+ "role": "system",
321
+ "content": system_prompt
322
+ },
323
+ {
324
+ "role": "user",
325
+ "content": [
326
+ {
327
+ "type": "text",
328
+ "text": curr_prompt
329
+ },
330
+ {
331
+ "type": "image",
332
+ "image": image_for_message
333
+ }
334
+ ]
335
+ }
336
+ ]
337
+
338
+ text = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
339
+ inputs = self.processor(text=[text], images=[image_for_message], padding=True, return_tensors="pt").to(self.model.device)
340
+
341
+ output_ids = self.model.generate(**inputs, max_new_tokens=self.max_new_tokens)
342
+ generated_ids = [output_ids[len(input_ids):] for input_ids, output_ids in zip(inputs.input_ids, output_ids)]
343
+ output_text = self.processor.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
344
+ response = output_text[0]
345
+
346
+ input_height = inputs['image_grid_thw'][0][1] * 14
347
+ input_width = inputs['image_grid_thw'][0][2] * 14
348
+ input_size = (input_width.item(), input_height.item())
349
+
350
+ del text, inputs, output_ids, generated_ids, output_text
351
+ torch.cuda.empty_cache()
352
+
353
+ return {'response': response, 'input_size': input_size}
354
+
355
+ # def predict(self, image, text_prompt, system_prompt, prompt_type='object', bbox_refine=False):
356
+ # # This method's internal logic remains the same, as it calls _get_bounding_boxes
357
+ # # which has been corrected.
358
+ # curr_prompt = self.prompt_manager.construct_prompt(text_prompt, prompt_type)
359
+
360
+ # print('this is my curr prompt:', curr_prompt)
361
+
362
+ # if isinstance(image, str):
363
+ # image_pil = load_image(image)
364
+ # image_for_message = image
365
+ # else:
366
+ # image_pil = image
367
+ # image_for_message = self.resize_image(image_pil, self.max_size)
368
+
369
+ # size = image_pil.size
370
+ # messages = [
371
+ # {"role": "system", "content": system_prompt},
372
+ # {"role": "user", "content": [{"type": "text", "text": curr_prompt}, {"type": "image", "image": image_for_message}]}
373
+ # ]
374
+
375
+ # text = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
376
+ # inputs = self.processor(text=[text], images=[image_for_message], padding=True, return_tensors="pt").to(self.model.device)
377
+
378
+ # output_ids = self.model.generate(**inputs, max_new_tokens=self.max_new_tokens)
379
+ # generated_ids = output_ids[:, inputs.input_ids.shape[1]:]
380
+ # response = self.processor.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)[0]
381
+
382
+ # input_height = inputs['image_grid_thw'][0][1] * 14
383
+ # input_width = inputs['image_grid_thw'][0][2] * 14
384
+ # input_size = (input_width.item(), input_height.item())
385
+
386
+ # # Cleanup
387
+ # del text, inputs, output_ids, generated_ids
388
+ # torch.cuda.empty_cache()
389
+
390
+ # # Bbox refine logic can be kept if needed
391
+ # # if bbox_refine: ...
392
+
393
+ # bbox_with_label = self._get_bounding_boxes(response, input_size, size)
394
+ # results = {'bbox_with_label': bbox_with_label, 'response': response, 'input_size': input_size}
395
+
396
+ # return results
397
+
398
+ def predict(self, image, text_prompt, system_prompt, prompt_type='object', bbox_refine=False):
399
+ curr_prompt = self.prompt_manager.construct_prompt(text_prompt, prompt_type)
400
+
401
+ print('this is my curr prompt:', curr_prompt)
402
+
403
+ # Handle the image input: either load from URL or use the PIL Image directly
404
+ if isinstance(image, str):
405
+ # It's a URL or path, load the image
406
+ image_type = 'str'
407
+ image_pil = load_image(image)
408
+ size = image_pil.size
409
+ image_for_message = image
410
+ messages = [
411
+ {
412
+ "role": "system",
413
+ "content": system_prompt
414
+ },
415
+ {
416
+ "role": "user",
417
+ "content": [
418
+ {
419
+ "type": "text",
420
+ "text": curr_prompt
421
+ },
422
+ {
423
+ "type": "image",
424
+ "image": image,
425
+ "min_pixels": self.min_pixels,
426
+ "max_pixels": self.max_pixels
427
+ }
428
+ ]
429
+ }
430
+ ]
431
+
432
+ text = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
433
+ image_inputs, video_inputs = process_vision_info(messages)
434
+ inputs = self.processor(text=[text],
435
+ images=image_inputs,
436
+ videos=video_inputs,
437
+ padding=True,
438
+ return_tensors="pt"
439
+ ).to(self.model.device)
440
+ else:
441
+ image_type = 'pil'
442
+ # Assume it's already a PIL Image
443
+ image_pil = image
444
+ # For the message content, we need to use the image directly
445
+
446
+ size = image_pil.size
447
+ image_for_message = self.resize_image(image_pil, self.max_size)
448
+
449
+ messages = [
450
+ {
451
+ "role": "system",
452
+ "content": system_prompt
453
+ },
454
+ {
455
+ "role": "user",
456
+ "content": [
457
+ {
458
+ "type": "text",
459
+ "text": curr_prompt
460
+ },
461
+ {
462
+ "type": "image",
463
+ "image": image_for_message
464
+ }
465
+ ]
466
+ }
467
+ ]
468
+
469
+ text = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
470
+ inputs = self.processor(text=[text], images=[image_for_message], padding=True, return_tensors="pt").to(self.model.device)
471
+
472
+ output_ids = self.model.generate(**inputs, max_new_tokens=self.max_new_tokens)
473
+ generated_ids = [output_ids[len(input_ids):] for input_ids, output_ids in zip(inputs.input_ids, output_ids)]
474
+ output_text = self.processor.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
475
+
476
+ input_height = inputs['image_grid_thw'][0][1]*14
477
+ input_width = inputs['image_grid_thw'][0][2]*14
478
+
479
+ response_org = output_text[0]
480
+ input_size = (input_width.item(), input_height.item())
481
+
482
+ del text, inputs, output_ids, generated_ids, output_text
483
+ torch.cuda.empty_cache()
484
+
485
+ if bbox_refine:
486
+ response = self._bbox_refine(response_org, messages, image_type, image_for_message)
487
+ else:
488
+ response = response_org
489
+
490
+ results = dict()
491
+ bbox_with_label = self._get_bounding_boxes(response, input_size, size)
492
+ results = {'bbox_with_label': bbox_with_label, 'response': response, 'input_size': input_size}
493
+
494
+ return results
495
+
496
+ def plot_bounding_boxes(self, image, response, input_size, output_path='./temp.png'):
497
+ img = image.copy()
498
+ width, height = img.size
499
+ input_width, input_height = input_size
500
+
501
+ draw = ImageDraw.Draw(img)
502
+ colors = list(ImageColor.colormap.keys())
503
+ font = ImageFont.load_default()
504
+
505
+ bounding_boxes_str = self._parse_json(response)
506
+ if not bounding_boxes_str:
507
+ print("No bounding boxes found in the response.")
508
+ img.save(output_path)
509
+ return
510
+
511
+ try:
512
+ # Use robust, standard JSON parsing
513
+ json_output = json.loads(bounding_boxes_str)
514
+ except json.JSONDecodeError:
515
+ print(f"Warning: Could not decode JSON for plotting: {bounding_boxes_str}")
516
+ img.save(output_path)
517
+ return
518
+
519
+ if isinstance(json_output, list) and len(json_output) > 0:
520
+ for i, bounding_box in enumerate(json_output):
521
+ try:
522
+ color = colors[i % len(colors)]
523
+ coords = bounding_box["bbox_2d"]
524
+ abs_x1 = int(coords[0] / input_width * width)
525
+ abs_y1 = int(coords[1] / input_height * height)
526
+ abs_x2 = int(coords[2] / input_width * width)
527
+ abs_y2 = int(coords[3] / input_height * height)
528
+
529
+ if abs_x1 > abs_x2: abs_x1, abs_x2 = abs_x2, abs_x1
530
+ if abs_y1 > abs_y2: abs_y1, abs_y2 = abs_y2, abs_y1
531
+
532
+ draw.rectangle(((abs_x1, abs_y1), (abs_x2, abs_y2)), outline=color, width=4)
533
+
534
+ if "label" in bounding_box:
535
+ draw.text((abs_x1 + 8, abs_y1 + 6), str(bounding_box["label"]), fill=color, font=font)
536
+ except (KeyError, IndexError, TypeError) as e:
537
+ print(f"Error processing bounding box for plotting {bounding_box}: {e}")
538
+
539
+ img.save(output_path)
540
+ print(f"Image with bounding boxes saved to: {output_path}")
541
+
542
+ def predict_bounding_boxes(self, image, response, input_size):
543
+ # This method is for extracting coordinates, so it also benefits from robust parsing
544
+ width, height = image.size
545
+ input_width, input_height = input_size
546
+
547
+ bounding_boxes_str = self._parse_json(response)
548
+ if not bounding_boxes_str:
549
+ return None
550
+
551
+ try:
552
+ # Use robust, standard JSON parsing
553
+ json_output = json.loads(bounding_boxes_str)
554
+ except json.JSONDecodeError:
555
+ return None
556
+
557
+ if isinstance(json_output, list) and len(json_output) > 0:
558
+ try:
559
+ bounding_box = json_output[0] # Assuming we only want the first one
560
+ coords = bounding_box["bbox_2d"]
561
+ abs_x1 = int(coords[0] / input_width * width)
562
+ abs_y1 = int(coords[1] / input_height * height)
563
+ abs_x2 = int(coords[2] / input_width * width)
564
+ abs_y2 = int(coords[3] / input_height * height)
565
+ if abs_x1 > abs_x2: abs_x1, abs_x2 = abs_x2, abs_x1
566
+ if abs_y1 > abs_y2: abs_y1, abs_y2 = abs_y2, abs_y1
567
+ return [abs_x1, abs_y1, abs_x2, abs_y2]
568
+ except (KeyError, IndexError, TypeError):
569
+ return None
570
+ return None
571
+
572
+ # ===================================================================
573
+ # NEW BATCHED INFERENCE FUNCTIONS
574
+ # ===================================================================
575
+ def chat_batch(self, images, text_prompts, system_prompt, prompt_type):
576
+ """Processes a batch of images and prompts for chat-style interaction."""
577
+ if not images:
578
+ return []
579
+
580
+ # 1. Prepare all inputs for the batch
581
+ prompts = [self.prompt_manager.construct_prompt(tp, prompt_type) for tp in text_prompts]
582
+ images_for_message = [self.resize_image(img, self.max_size) for img in images]
583
+
584
+ print(f"Batch size: {len(images)}")
585
+ print(f"Prompts: {prompts}")
586
+
587
+ # 2. Create message structures for the entire batch
588
+ batch_messages = []
589
+ for i in range(len(images)):
590
+ batch_messages.append([
591
+ {"role": "system", "content": system_prompt},
592
+ {"role": "user", "content": [{"type": "text", "text": prompts[i]}, {"type": "image"}]}
593
+ ])
594
+
595
+ # 3. Use the processor on the entire batch at once
596
+ text_for_processor = [self.processor.apply_chat_template(m, tokenize=False, add_generation_prompt=True) for m in batch_messages]
597
+ inputs = self.processor(
598
+ text=text_for_processor,
599
+ images=images_for_message,
600
+ padding=True,
601
+ return_tensors="pt"
602
+ ).to(self.model.device)
603
+
604
+ # 4. Generate outputs for the entire batch
605
+ output_ids = self.model.generate(**inputs, max_new_tokens=self.max_new_tokens)
606
+ generated_ids = [out[len(inp):] for inp, out in zip(inputs.input_ids, output_ids)]
607
+ responses = self.processor.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
608
+
609
+ # 5. Format results
610
+ input_height = inputs['image_grid_thw'][0][1] * 14
611
+ input_width = inputs['image_grid_thw'][0][2] * 14
612
+ input_size = (input_width.item(), input_height.item())
613
+
614
+ batch_results = []
615
+ for response in responses:
616
+ batch_results.append({'response': response, 'input_size': input_size})
617
+
618
+ del inputs, output_ids, generated_ids
619
+ # torch.cuda.empty_cache()
620
+
621
+ return batch_results
622
+
623
+ def predict_batch(self, images, text_prompts, system_prompt, prompt_type='object'):
624
+ """Processes a batch of images and prompts to predict bounding boxes."""
625
+ if not images:
626
+ return []
627
+
628
+ # 1. Prepare all inputs for the batch
629
+ prompts = [self.prompt_manager.construct_prompt(tp, prompt_type) for tp in text_prompts]
630
+ images_for_message = [self.resize_image(img, self.max_size) for img in images]
631
+ original_sizes = [img.size for img in images]
632
+
633
+ # 2. Create message structures for the entire batch
634
+ batch_messages = []
635
+ for i in range(len(images)):
636
+ batch_messages.append([
637
+ {"role": "system", "content": system_prompt},
638
+ {"role": "user", "content": [{"type": "text", "text": prompts[i]}, {"type": "image"}]}
639
+ ])
640
+
641
+ # 3. Use the processor on the entire batch at once
642
+ text_for_processor = [self.processor.apply_chat_template(m, tokenize=False, add_generation_prompt=True) for m in batch_messages]
643
+ inputs = self.processor(
644
+ text=text_for_processor,
645
+ images=images_for_message,
646
+ padding=True,
647
+ return_tensors="pt"
648
+ ).to(self.model.device)
649
+
650
+ # 4. Generate outputs for the entire batch
651
+ output_ids = self.model.generate(**inputs, max_new_tokens=self.max_new_tokens)
652
+ generated_ids = [out[len(inp):] for inp, out in zip(inputs.input_ids, output_ids)]
653
+ responses = self.processor.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
654
+
655
+ # 5. Format results
656
+ input_height = inputs['image_grid_thw'][0][1] * 14
657
+ input_width = inputs['image_grid_thw'][0][2] * 14
658
+ input_size = (input_width.item(), input_height.item())
659
+
660
+ batch_results = []
661
+ for i, response in enumerate(responses):
662
+ if prompt_type == 'object_with_conf':
663
+ bbox_with_label = self._get_bounding_boxes_with_conf(response, input_size, original_sizes[i])
664
+ else:
665
+ bbox_with_label = self._get_bounding_boxes(response, input_size, original_sizes[i])
666
+ batch_results.append({
667
+ 'bbox_with_label': bbox_with_label,
668
+ 'response': response,
669
+ 'input_size': input_size
670
+ })
671
+
672
+ del inputs, output_ids, generated_ids
673
+ # torch.cuda.empty_cache()
674
+
675
+ return batch_results
676
+
677
+ def predict_paired_batch(self, images1, images2, text_prompts, system_prompt, prompt_type='self-handled'):
678
+ """Processes a batch of images and prompts to predict bounding boxes."""
679
+ if not images1 or not images2:
680
+ return []
681
+
682
+ assert len(images1) == len(images2) == len(text_prompts), "All inputs must have the same length"
683
+
684
+ # 1. Prepare prompts
685
+ prompts = [self.prompt_manager.construct_prompt(tp, prompt_type) for tp in text_prompts]
686
+
687
+ # 2. Create batched messages
688
+ batch_messages = []
689
+ for i in range(len(images1)):
690
+ batch_messages.append([
691
+ {"role": "system", "content": system_prompt},
692
+ {"role": "user",
693
+ "content": [
694
+ {"type": "image", "image": images1[i]},
695
+ {"type": "image", "image": images2[i]},
696
+ {"type": "text", "text": prompts[i]},
697
+ ]
698
+ }
699
+ ])
700
+
701
+ # 3. Extract vision inputs (handles batch of conversations)
702
+ image_inputs, video_inputs = process_vision_info(batch_messages)
703
+
704
+ # 4. Apply chat template and tokenize
705
+ texts = [
706
+ self.processor.apply_chat_template(msg, tokenize=False, add_generation_prompt=True)
707
+ for msg in batch_messages
708
+ ]
709
+ inputs = self.processor(
710
+ text=texts,
711
+ images=image_inputs,
712
+ videos=video_inputs,
713
+ padding=True,
714
+ return_tensors="pt"
715
+ ).to(self.model.device)
716
+
717
+ print('inputs preprocess', inputs['image_grid_thw'])
718
+
719
+ # 5. Generate
720
+ output_ids = self.model.generate(
721
+ **inputs,
722
+ max_new_tokens=self.max_new_tokens
723
+ )
724
+ generated_ids = [out[len(inp):] for inp, out in zip(inputs.input_ids, output_ids)]
725
+ response_texts = self.processor.batch_decode(
726
+ generated_ids,
727
+ skip_special_tokens=True,
728
+ clean_up_tokenization_spaces=True
729
+ )
730
+
731
+ # 6. Parse responses and compute input sizes per sample
732
+ batch_results = []
733
+ for i, text in enumerate(response_texts):
734
+ print(f"Response text for sample {i}: {text}")
735
+ # Parse JSON
736
+ try:
737
+ clean_text = text.strip()
738
+ if clean_text.startswith("```"):
739
+ clean_text = "\n".join(line for line in clean_text.splitlines()
740
+ if not line.startswith("```"))
741
+ response = json.loads(clean_text)
742
+ except json.JSONDecodeError:
743
+ response = {
744
+ "bbox_2d": [0, 0, 0, 0],
745
+ "visible": False,
746
+ "label": "unknown",
747
+ "confidence": 0.0,
748
+ "reasoning": "Failed to parse model output."
749
+ }
750
+
751
+ # Get correct image grid for this sample
752
+ crop_grid_thw = inputs['image_grid_thw'][2*i] # [T, H, W]
753
+ crop_height = crop_grid_thw[1].item() * 14 # H patches
754
+ crop_width = crop_grid_thw[2].item() * 14 # W patches
755
+ crop_size = (crop_height, crop_width)
756
+
757
+ img_grid_thw = inputs['image_grid_thw'][2*i+1] # [T, H, W]
758
+ img_height = img_grid_thw[1].item() * 14 # H
759
+ img_width = img_grid_thw[2].item() * 14 # W
760
+ img_size = (img_height, img_width)
761
+
762
+ batch_results.append({
763
+ 'img2_bbox': response.get('bbox_2d', [0,0,0,0]),
764
+ 'visible': response.get('visible', False),
765
+ 'crop_size': crop_size,
766
+ 'img_size': img_size,
767
+ 'label': response.get('label', 'unknown'),
768
+ 'reasoning': response.get('reasoning', 'No reasoning provided.'),
769
+ 'raw_response_text': text # useful for debugging
770
+ })
771
+
772
+ # Cleanup
773
+ del inputs, output_ids, generated_ids
774
+ return batch_results
775
+
776
+
777
+ # class PromptManager:
778
+ # """
779
+ # Builds a prompt that forces the LLM to return
780
+ # a *pure* JSON list of bounding-box dictionaries:
781
+
782
+ # [
783
+ # {"bbox_2d": [x1, y1, x2, y2], "label": "<your-label>"},
784
+ # ...
785
+ # ]
786
+ # """
787
+
788
+ # @staticmethod
789
+ # def construct_prompt(text_prompt: str, prompt_type: str = "object") -> str:
790
+ # """
791
+ # Parameters
792
+ # ----------
793
+ # target_desc : str
794
+ # Human-readable name of what you want boxed, e.g. "rear wheel of the motorbike".
795
+ # prompt_type : str
796
+ # Supported: "object" (build full bounding-box prompt)
797
+ # "self-handled" (return target_desc unchanged)
798
+ # """
799
+ # if prompt_type.lower() == "object":
800
+ # # ░░░ Prompt template ░░░
801
+ # prompt = f"""{text_prompt}
802
+ # Required format (copy this structure exactly):
803
+ # ```json
804
+ # [
805
+ # {{"bbox_2d": [x1, y1, x2, y2], "label": "<your-label>"}},
806
+ # ]
807
+ # ```
808
+ # """
809
+ # # Strip leading spaces so the first char is '[' once the model starts its answer
810
+ # return prompt
811
+
812
+ # elif prompt_type.lower() == "self-handled":
813
+ # return text_prompt
814
+ # else:
815
+ # raise NotImplementedError("prompt_type must be 'object' or 'self-handled'")
816
+
817
+
818
+ # import os
819
+ # from os import path
820
+ # from PIL import Image, ImageDraw, ImageFont
821
+ # from PIL import ImageColor
822
+ # import json
823
+ # import requests
824
+ # from io import BytesIO
825
+ # from urllib.parse import urlparse
826
+ # import pathlib
827
+
828
+ # import ast
829
+ # import torch
830
+ # from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor
831
+ # from qwen_vl_utils import process_vision_info
832
+
833
+ # # from .prompt_manager import PromptManager
834
+
835
+
836
+ # def load_image(source, image_path=None):
837
+ # """
838
+ # Load an image from a URL or local file path and return a PIL Image object.
839
+ # Optionally save the loaded image to a specified path.
840
+
841
+ # Args:
842
+ # source (str): URL or local file path to the image
843
+ # image_path (str, optional): Path where to save the loaded image
844
+
845
+ # Returns:
846
+ # PIL.Image: The loaded image or None if loading fails
847
+ # """
848
+ # try:
849
+ # # Check if the source is a URL or local path
850
+ # parsed = urlparse(source)
851
+ # is_url = bool(parsed.scheme and parsed.netloc)
852
+
853
+ # if is_url:
854
+ # # Handle URL
855
+ # response = requests.get(source, stream=True)
856
+ # response.raise_for_status()
857
+ # img = Image.open(BytesIO(response.content))
858
+ # else:
859
+ # # Handle local file path
860
+ # if path.exists(source):
861
+ # img = Image.open(source)
862
+ # else:
863
+ # print(f"Error: Local file not found at {source}")
864
+ # return None
865
+
866
+ # # Save the image if image_path is provided
867
+ # if image_path is not None:
868
+ # # Make sure the directory exists
869
+ # directory = path.dirname(image_path)
870
+ # if directory and not path.exists(directory):
871
+ # pathlib.Path(directory).mkdir(parents=True, exist_ok=True)
872
+
873
+ # # Save the image in the appropriate format based on file extension
874
+ # img.save(image_path)
875
+ # print(f"Image saved to {image_path}")
876
+
877
+ # return img
878
+
879
+ # except Exception as e:
880
+ # print(f"Error loading image: {e}")
881
+ # return None
882
+
883
+
884
+ # class QwenVLDetector():
885
+ # # Qwen 2.5 VL
886
+
887
+ # def __init__(self, model_dir=None, torch_dtype=torch.bfloat16, device="cuda:0", model_name="Qwen/Qwen2.5-VL-7B-Instruct", flash_attn=False):
888
+ # if model_dir is not None:
889
+ # # Load model and processor from local directory
890
+ # self.model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
891
+ # model_dir,
892
+ # torch_dtype=torch_dtype,
893
+ # device_map=device, # Explicitly use first GPU
894
+ # local_files_only=True # Ensures it only looks for local files
895
+ # )
896
+ # self.processor = AutoProcessor.from_pretrained(model_dir)
897
+ # else:
898
+ # if flash_attn:
899
+ # self.model = Qwen2_5_VLForConditionalGeneration.from_pretrained(model_name, torch_dtype=torch_dtype, device_map=device, attn_implementation="flash_attention_2")
900
+ # else:
901
+ # self.model = Qwen2_5_VLForConditionalGeneration.from_pretrained(model_name, torch_dtype=torch_dtype, device_map=device)
902
+ # self.processor = AutoProcessor.from_pretrained(model_name)
903
+
904
+ # # self.system_prompt = system_prompt
905
+ # self.prompt_manager = PromptManager()
906
+ # self._init_parameters()
907
+
908
+ # def _init_parameters(self):
909
+ # self.max_size = 1024
910
+ # self.max_new_tokens = 1024
911
+ # self.min_pixels = 512 * 512
912
+ # self.max_pixels = self.max_size * self.max_size
913
+
914
+ # def resize_image(self, img, max_size):
915
+ # # Get original dimensions
916
+ # width, height = img.size
917
+ # # If image already satisfies the size limit, return original
918
+ # if max(width, height) <= max_size:
919
+ # return img
920
+
921
+ # # Calculate scaling factor
922
+ # if width >= height:
923
+ # # Width is the long edge
924
+ # scaling_factor = max_size / width
925
+ # new_width = max_size
926
+ # new_height = int(height * scaling_factor)
927
+ # else:
928
+ # # Height is the long edge
929
+ # scaling_factor = max_size / height
930
+ # new_height = max_size
931
+ # new_width = int(width * scaling_factor)
932
+
933
+ # # Resize image
934
+ # resized_img = img.resize((new_width, new_height), Image.Resampling.LANCZOS)
935
+ # return resized_img
936
+
937
+ # # @title Parsing JSON output
938
+ # def _parse_json(self, json_output):
939
+ # # Parsing out the markdown fencing
940
+ # lines = json_output.splitlines()
941
+ # for i, line in enumerate(lines):
942
+ # if line == "```json":
943
+ # json_output = "\n".join(lines[i+1:]) # Remove everything before "```json"
944
+ # json_output = json_output.split("```")[0] # Remove everything after the closing "```"
945
+ # return json_output
946
+ # return None # Bounding box not found
947
+
948
+ # def calculate_iou(self, box1, box2):
949
+ # """
950
+ # Calculate the Intersection over Union (IoU) between two bounding boxes.
951
+
952
+ # Args:
953
+ # box1 (list): First bounding box in format [x1, y1, x2, y2]
954
+ # where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner
955
+ # box2 (list): Second bounding box in same format
956
+
957
+ # Returns:
958
+ # float: IoU value between 0 and 1
959
+ # """
960
+ # # Extract coordinates
961
+ # x1_1, y1_1, x2_1, y2_1 = box1
962
+ # x1_2, y1_2, x2_2, y2_2 = box2
963
+
964
+ # # Calculate area of each bounding box
965
+ # area1 = (x2_1 - x1_1) * (y2_1 - y1_1)
966
+ # area2 = (x2_2 - x1_2) * (y2_2 - y1_2)
967
+
968
+ # # Calculate coordinates of intersection
969
+ # x1_i = max(x1_1, x1_2)
970
+ # y1_i = max(y1_1, y1_2)
971
+ # x2_i = min(x2_1, x2_2)
972
+ # y2_i = min(y2_1, y2_2)
973
+
974
+ # # Check if there is an intersection
975
+ # if x2_i <= x1_i or y2_i <= y1_i:
976
+ # return 0.0
977
+
978
+ # # Calculate intersection area
979
+ # intersection_area = (x2_i - x1_i) * (y2_i - y1_i)
980
+
981
+ # # Calculate union area (sum of areas - intersection)
982
+ # union_area = area1 + area2 - intersection_area
983
+
984
+ # # Calculate IoU
985
+ # iou = intersection_area / union_area
986
+
987
+ # return iou
988
+
989
+ # def _check_bbox(self, bbox_with_label, bbox_coor, size, threshold=0.7):
990
+ # x1, y1, x2, y2 = bbox_coor
991
+ # width, height = size
992
+ # if (x2 - x1) * (y2 - y1) / width / height >= threshold:
993
+ # return False
994
+
995
+ # for idx, bbox_label in bbox_with_label.items():
996
+ # iou = self.calculate_iou(bbox_label['bbox'], bbox_coor)
997
+ # if iou >= threshold:
998
+ # return False
999
+
1000
+ # return True
1001
+
1002
+ # def _get_bounding_boxes(self, response, input_size, size):
1003
+ # # Parsing out the markdown fencing
1004
+ # bounding_boxes = self._parse_json(response)
1005
+ # if bounding_boxes is None:
1006
+ # return dict()
1007
+
1008
+ # input_width, input_height = input_size
1009
+ # width, height = size
1010
+
1011
+ # try:
1012
+ # json_output = ast.literal_eval(bounding_boxes)
1013
+ # except Exception as e:
1014
+ # end_idx = bounding_boxes.rfind('"}') + len('"}')
1015
+ # truncated_text = bounding_boxes[:end_idx] + "]"
1016
+ # json_output = ast.literal_eval(truncated_text)
1017
+
1018
+ # # Iterate over the bounding boxes
1019
+ # bbox_with_label = dict()
1020
+
1021
+ # if len(json_output) > 0:
1022
+ # for i, bounding_box in enumerate(json_output):
1023
+ # try:
1024
+
1025
+ # # Convert normalized coordinates to absolute coordinates
1026
+ # abs_x1 = int(bounding_box["bbox_2d"][0] / input_width * width)
1027
+ # abs_y1 = int(bounding_box["bbox_2d"][1] / input_height * height)
1028
+ # abs_x2 = int(bounding_box["bbox_2d"][2] / input_width * width)
1029
+ # abs_y2 = int(bounding_box["bbox_2d"][3] / input_height * height)
1030
+
1031
+ # if abs_x1 > abs_x2:
1032
+ # abs_x1, abs_x2 = abs_x2, abs_x1
1033
+
1034
+ # if abs_y1 > abs_y2:
1035
+ # abs_y1, abs_y2 = abs_y2, abs_y1
1036
+
1037
+ # if self._check_bbox(bbox_with_label, [abs_x1, abs_y1, abs_x2, abs_y2], size):
1038
+ # bbox_with_label[i] = {'bbox': [abs_x1, abs_y1, abs_x2, abs_y2], 'label': bounding_box.get('label')}
1039
+
1040
+ # except Exception as e:
1041
+ # print(f"Error {str(e)} for bounding box {bounding_box}")
1042
+
1043
+ # return bbox_with_label
1044
+
1045
+ # def _bbox_refine(self, response_org, messages, image_type, image_for_message):
1046
+ # bbox_text = self._parse_json(response_org)
1047
+ # if bbox_text is None:
1048
+ # return response_org
1049
+
1050
+ # prompt_type = 'bbox_refine'
1051
+ # text_prompt = self.prompt_manager.construct_prompt(bbox_text, prompt_type)
1052
+ # messages[1]['content'][0]['text'] = text_prompt
1053
+
1054
+ # if image_type == 'str':
1055
+ # text = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
1056
+ # image_inputs, video_inputs = process_vision_info(messages)
1057
+ # inputs = self.processor(text=[text],
1058
+ # images=image_inputs,
1059
+ # videos=video_inputs,
1060
+ # padding=True,
1061
+ # return_tensors="pt"
1062
+ # ).to(self.model.device)
1063
+ # elif image_type == 'pil':
1064
+ # # Assume it's already a PIL Image
1065
+ # messages[1]['content'][1]['image'] = image_for_message
1066
+ # text = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
1067
+ # inputs = self.processor(text=[text], images=[image_for_message], padding=True, return_tensors="pt").to(self.model.device)
1068
+ # else:
1069
+ # raise NotImplementedError
1070
+
1071
+ # output_ids = self.model.generate(**inputs, max_new_tokens=self.max_new_tokens)
1072
+ # generated_ids = [output_ids[len(input_ids):] for input_ids, output_ids in zip(inputs.input_ids, output_ids)]
1073
+ # output_text = self.processor.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
1074
+ # response = output_text[0]
1075
+
1076
+ # del text, inputs, output_ids, generated_ids, output_text
1077
+ # torch.cuda.empty_cache()
1078
+
1079
+ # return response
1080
+
1081
+ # def chat(self, image, text_prompt, system_prompt, prompt_type):
1082
+ # self.text_prompt = self.prompt_manager.construct_prompt(text_prompt, prompt_type)
1083
+
1084
+ # # Handle the image input: either load from URL or use the PIL Image directly
1085
+ # if isinstance(image, str):
1086
+ # # It's a URL or path, load the image
1087
+ # image_type = 'str'
1088
+ # image_pil = load_image(image)
1089
+ # size = image_pil.size
1090
+ # messages = [
1091
+ # {
1092
+ # "role": "system",
1093
+ # "content": system_prompt
1094
+ # },
1095
+ # {
1096
+ # "role": "user",
1097
+ # "content": [
1098
+ # {
1099
+ # "type": "text",
1100
+ # "text": self.text_prompt
1101
+ # },
1102
+ # {
1103
+ # "type": "image",
1104
+ # "image": image,
1105
+ # "min_pixels": self.min_pixels,
1106
+ # "max_pixels": self.max_pixels
1107
+ # }
1108
+ # ]
1109
+ # }
1110
+ # ]
1111
+
1112
+ # text = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
1113
+ # image_inputs, video_inputs = process_vision_info(messages)
1114
+ # inputs = self.processor(text=[text],
1115
+ # images=image_inputs,
1116
+ # videos=video_inputs,
1117
+ # padding=True,
1118
+ # return_tensors="pt"
1119
+ # ).to(self.model.device)
1120
+
1121
+ # else:
1122
+ # image_type = 'pil'
1123
+ # # Assume it's already a PIL Image
1124
+ # image_pil = image
1125
+ # # For the message content, we need to use the image directly
1126
+
1127
+ # size = image_pil.size
1128
+ # image_for_message = self.resize_image(image_pil, self.max_size)
1129
+
1130
+ # messages = [
1131
+ # {
1132
+ # "role": "system",
1133
+ # "content": system_prompt
1134
+ # },
1135
+ # {
1136
+ # "role": "user",
1137
+ # "content": [
1138
+ # {
1139
+ # "type": "text",
1140
+ # "text": self.text_prompt
1141
+ # },
1142
+ # {
1143
+ # "type": "image",
1144
+ # "image": image_for_message
1145
+ # }
1146
+ # ]
1147
+ # }
1148
+ # ]
1149
+
1150
+ # text = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
1151
+ # inputs = self.processor(text=[text], images=[image_for_message], padding=True, return_tensors="pt").to(self.model.device)
1152
+
1153
+ # output_ids = self.model.generate(**inputs, max_new_tokens=self.max_new_tokens)
1154
+ # generated_ids = [output_ids[len(input_ids):] for input_ids, output_ids in zip(inputs.input_ids, output_ids)]
1155
+ # output_text = self.processor.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
1156
+ # response = output_text[0]
1157
+
1158
+ # input_height = inputs['image_grid_thw'][0][1] * 14
1159
+ # input_width = inputs['image_grid_thw'][0][2] * 14
1160
+ # input_size = (input_width.item(), input_height.item())
1161
+
1162
+ # del text, inputs, output_ids, generated_ids, output_text
1163
+ # torch.cuda.empty_cache()
1164
+
1165
+ # return {'response': response, 'input_size': input_size}
1166
+
1167
+ # def predict(self, image, text_prompt, system_prompt, prompt_type='object', bbox_refine=False):
1168
+ # self.text_prompt = self.prompt_manager.construct_prompt(text_prompt, prompt_type)
1169
+
1170
+ # # Handle the image input: either load from URL or use the PIL Image directly
1171
+ # if isinstance(image, str):
1172
+ # # It's a URL or path, load the image
1173
+ # image_type = 'str'
1174
+ # image_pil = load_image(image)
1175
+ # size = image_pil.size
1176
+ # image_for_message = image
1177
+ # messages = [
1178
+ # {
1179
+ # "role": "system",
1180
+ # "content": system_prompt
1181
+ # },
1182
+ # {
1183
+ # "role": "user",
1184
+ # "content": [
1185
+ # {
1186
+ # "type": "text",
1187
+ # "text": self.text_prompt
1188
+ # },
1189
+ # {
1190
+ # "type": "image",
1191
+ # "image": image,
1192
+ # "min_pixels": self.min_pixels,
1193
+ # "max_pixels": self.max_pixels
1194
+ # }
1195
+ # ]
1196
+ # }
1197
+ # ]
1198
+
1199
+ # text = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
1200
+ # image_inputs, video_inputs = process_vision_info(messages)
1201
+ # inputs = self.processor(text=[text],
1202
+ # images=image_inputs,
1203
+ # videos=video_inputs,
1204
+ # padding=True,
1205
+ # return_tensors="pt"
1206
+ # ).to(self.model.device)
1207
+
1208
+ # else:
1209
+ # image_type = 'pil'
1210
+ # # Assume it's already a PIL Image
1211
+ # image_pil = image
1212
+ # # For the message content, we need to use the image directly
1213
+
1214
+ # size = image_pil.size
1215
+ # image_for_message = self.resize_image(image_pil, self.max_size)
1216
+
1217
+ # messages = [
1218
+ # {
1219
+ # "role": "system",
1220
+ # "content": system_prompt
1221
+ # },
1222
+ # {
1223
+ # "role": "user",
1224
+ # "content": [
1225
+ # {
1226
+ # "type": "text",
1227
+ # "text": self.text_prompt
1228
+ # },
1229
+ # {
1230
+ # "type": "image",
1231
+ # "image": image_for_message
1232
+ # }
1233
+ # ]
1234
+ # }
1235
+ # ]
1236
+
1237
+ # text = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
1238
+ # inputs = self.processor(text=[text], images=[image_for_message], padding=True, return_tensors="pt").to(self.model.device)
1239
+
1240
+ # output_ids = self.model.generate(**inputs, max_new_tokens=self.max_new_tokens)
1241
+ # generated_ids = [output_ids[len(input_ids):] for input_ids, output_ids in zip(inputs.input_ids, output_ids)]
1242
+ # output_text = self.processor.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
1243
+
1244
+ # input_height = inputs['image_grid_thw'][0][1]*14
1245
+ # input_width = inputs['image_grid_thw'][0][2]*14
1246
+
1247
+ # response_org = output_text[0]
1248
+ # input_size = (input_width.item(), input_height.item())
1249
+
1250
+ # del text, inputs, output_ids, generated_ids, output_text
1251
+ # torch.cuda.empty_cache()
1252
+
1253
+ # if bbox_refine:
1254
+ # response = self._bbox_refine(response_org, messages, image_type, image_for_message)
1255
+ # else:
1256
+ # response = response_org
1257
+
1258
+ # results = dict()
1259
+ # bbox_with_label = self._get_bounding_boxes(response, input_size, size)
1260
+ # results = {'bbox_with_label': bbox_with_label, 'response': response, 'input_size': input_size}
1261
+
1262
+ # return results
1263
+
1264
+ # def plot_bounding_boxes(self, image, response, input_size, output_path='./temp.png'):
1265
+
1266
+ # # Load the image
1267
+ # img = image.copy()
1268
+ # width, height = img.size
1269
+ # input_width, input_height = input_size
1270
+
1271
+ # # Create a drawing object
1272
+ # draw = ImageDraw.Draw(img)
1273
+
1274
+ # additional_colors = [colorname for (colorname, colorcode) in ImageColor.colormap.items()]
1275
+
1276
+ # # Define a list of colors
1277
+ # colors = [
1278
+ # 'red',
1279
+ # 'green',
1280
+ # 'blue',
1281
+ # 'yellow',
1282
+ # 'orange',
1283
+ # 'pink',
1284
+ # 'purple',
1285
+ # 'brown',
1286
+ # 'gray',
1287
+ # 'beige',
1288
+ # 'turquoise',
1289
+ # 'cyan',
1290
+ # 'magenta',
1291
+ # 'lime',
1292
+ # 'navy',
1293
+ # 'maroon',
1294
+ # 'teal',
1295
+ # 'olive',
1296
+ # 'coral',
1297
+ # 'lavender',
1298
+ # 'violet',
1299
+ # 'gold',
1300
+ # 'silver',
1301
+ # ] + additional_colors
1302
+
1303
+ # # Parsing out the markdown fencing
1304
+ # bounding_boxes = self._parse_json(response)
1305
+
1306
+ # # font = ImageFont.truetype("NotoSansCJK-Regular.ttc", size=14)
1307
+ # font = ImageFont.load_default()
1308
+
1309
+ # # print(f"Bounding boxes: {bounding_boxes}")
1310
+
1311
+ # try:
1312
+ # # print(f"Bounding boxes: {bounding_boxes}")
1313
+ # if bounding_boxes is None:
1314
+ # print("No bounding boxes found in the response.")
1315
+ # return
1316
+ # json_output = ast.literal_eval(bounding_boxes)
1317
+ # except Exception as e:
1318
+ # end_idx = bounding_boxes.rfind('"}') + len('"}')
1319
+ # truncated_text = bounding_boxes[:end_idx] + "]"
1320
+ # json_output = ast.literal_eval(truncated_text)
1321
+
1322
+ # print(f"Parsed JSON output: {json_output}")
1323
+
1324
+ # if len(json_output) > 0:
1325
+
1326
+ # # Iterate over the bounding boxes
1327
+ # for i, bounding_box in enumerate(json_output):
1328
+ # try:
1329
+ # # Select a color from the list
1330
+ # color = colors[i % len(colors)]
1331
+ # # Convert normalized coordinates to absolute coordinates
1332
+ # abs_x1 = int(bounding_box["bbox_2d"][0] / input_width * width)
1333
+ # abs_y1 = int(bounding_box["bbox_2d"][1] / input_height * height)
1334
+ # abs_x2 = int(bounding_box["bbox_2d"][2] / input_width * width)
1335
+ # abs_y2 = int(bounding_box["bbox_2d"][3] / input_height * height)
1336
+
1337
+ # if abs_x1 > abs_x2:
1338
+ # abs_x1, abs_x2 = abs_x2, abs_x1
1339
+
1340
+ # if abs_y1 > abs_y2:
1341
+ # abs_y1, abs_y2 = abs_y2, abs_y1
1342
+
1343
+ # # Draw the bounding box
1344
+ # draw.rectangle(
1345
+ # ((abs_x1, abs_y1), (abs_x2, abs_y2)), outline=color, width=4
1346
+ # )
1347
+
1348
+ # # Draw the text
1349
+ # if "label" in bounding_box:
1350
+ # draw.text((abs_x1 + 8, abs_y1 + 6), bounding_box["label"], fill=color, font=font)
1351
+ # except Exception as e:
1352
+ # print(f"Error {str(e)} for bounding box {bounding_box}")
1353
+
1354
+ # img.save(output_path)
1355
+ # print(f"Image with bounding boxes saved to: {output_path}")
1356
+
1357
+
1358
+ # def predict_bounding_boxes(self, image, response, input_size):
1359
+
1360
+ # # Load the image
1361
+ # img = image.copy()
1362
+ # width, height = img.size
1363
+ # input_width, input_height = input_size
1364
+
1365
+ # # Create a drawing object
1366
+ # # draw = ImageDraw.Draw(img)
1367
+
1368
+ # additional_colors = [colorname for (colorname, colorcode) in ImageColor.colormap.items()]
1369
+
1370
+ # # # Define a list of colors
1371
+ # # colors = [
1372
+ # # 'red',
1373
+ # # 'green',
1374
+ # # 'blue',
1375
+ # # 'yellow',
1376
+ # # 'orange',
1377
+ # # 'pink',
1378
+ # # 'purple',
1379
+ # # 'brown',
1380
+ # # 'gray',
1381
+ # # 'beige',
1382
+ # # 'turquoise',
1383
+ # # 'cyan',
1384
+ # # 'magenta',
1385
+ # # 'lime',
1386
+ # # 'navy',
1387
+ # # 'maroon',
1388
+ # # 'teal',
1389
+ # # 'olive',
1390
+ # # 'coral',
1391
+ # # 'lavender',
1392
+ # # 'violet',
1393
+ # # 'gold',
1394
+ # # 'silver',
1395
+ # # ] + additional_colors
1396
+
1397
+ # # Parsing out the markdown fencing
1398
+ # bounding_boxes = self._parse_json(response)
1399
+
1400
+ # # # font = ImageFont.truetype("NotoSansCJK-Regular.ttc", size=14)
1401
+ # # font = ImageFont.load_default()
1402
+
1403
+ # # print(f"Bounding boxes: {bounding_boxes}")
1404
+
1405
+ # try:
1406
+ # # print(f"Bounding boxes: {bounding_boxes}")
1407
+ # if bounding_boxes is None:
1408
+ # print("No bounding boxes found in the response.")
1409
+ # return
1410
+ # json_output = ast.literal_eval(bounding_boxes)
1411
+ # except Exception as e:
1412
+ # end_idx = bounding_boxes.rfind('"}') + len('"}')
1413
+ # truncated_text = bounding_boxes[:end_idx] + "]"
1414
+ # json_output = ast.literal_eval(truncated_text)
1415
+
1416
+ # print(f"Parsed JSON output: {json_output}")
1417
+
1418
+ # if len(json_output) > 0:
1419
+
1420
+ # # Iterate over the bounding boxes
1421
+ # for i, bounding_box in enumerate(json_output):
1422
+ # try:
1423
+ # # # Select a color from the list
1424
+ # # color = colors[i % len(colors)]
1425
+ # # Convert normalized coordinates to absolute coordinates
1426
+ # abs_x1 = int(bounding_box["bbox_2d"][0] / input_width * width)
1427
+ # abs_y1 = int(bounding_box["bbox_2d"][1] / input_height * height)
1428
+ # abs_x2 = int(bounding_box["bbox_2d"][2] / input_width * width)
1429
+ # abs_y2 = int(bounding_box["bbox_2d"][3] / input_height * height)
1430
+
1431
+ # if abs_x1 > abs_x2:
1432
+ # abs_x1, abs_x2 = abs_x2, abs_x1
1433
+
1434
+ # if abs_y1 > abs_y2:
1435
+ # abs_y1, abs_y2 = abs_y2, abs_y1
1436
+
1437
+ # # # Draw the bounding box
1438
+ # # draw.rectangle(
1439
+ # # ((abs_x1, abs_y1), (abs_x2, abs_y2)), outline=color, width=4
1440
+ # # )
1441
+
1442
+ # # # Draw the text
1443
+ # # if "label" in bounding_box:
1444
+ # # draw.text((abs_x1 + 8, abs_y1 + 6), bounding_box["label"], fill=color, font=font)
1445
+
1446
+ # return [abs_x1, abs_y1, abs_x2, abs_y2]
1447
+ # except Exception as e:
1448
+ # print(f"Error {str(e)} for bounding box {bounding_box}")
1449
+
1450
+ # # img.save(output_path)
1451
+ # # print(f"Image with bounding boxes saved to: {output_path}")
1452
+
Code/sc_dit/single_dp_pck_results.csv ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ ,pascal_zs_dit_t_0.5_layer_10.log,pascal_zs_dit_t_0.6_layer_9.log,pascal_zs_dit_t_0.6_layer_11_no_dino.log,pascal_zs_dit_t_0.3_layer_10_no_dino.log,pascal_zs_dit_t_0.3_layer_15_no_dino.log,pascal_zs_dit_t_0.6_layer_10.log,pascal_zs_dit_t_0.3_layer_18_no_dino.log,pascal_zs_dit_t_0.8_layer_9_no_dino.log,pascal_zs_dit_t_0.8_layer_12.log,pascal_zs_dit_t_0.3_layer_15.log,pascal_zs_dit_t_0.6_layer_20.log,pascal_zs_dit_t_0.8_layer_13.log,pascal_zs_dit_t_0.7_layer_20_no_dino.log,pascal_zs_dit_t_0.8_layer_13_no_dino.log,pascal_zs_dit_t_0.8_layer_12_no_dino.log,pascal_zs_dit_t_0.7_layer_13.log,pascal_zs_dit_t_0.8_layer_15.log,pascal_zs_dit_t_0.7_layer_15_no_dino.log,pascal_zs_dit_t_0.8_layer_18_no_dino.log,pascal_zs_dit_t_0.3_layer_13.log,pascal_zs_dit_t_0.3_layer_9.log,pascal_zs_dit_t_0.5_layer_12_no_dino.log,pascal_zs_dit_t_0.6_layer_15_no_dino.log,pascal_zs_dit_t_0.8_layer_20.log,pascal_zs_dit_t_0.5_layer_12.log,pascal_zs_dit_t_0.7_layer_18_no_dino.log,pascal_zs_dit_t_0.7_layer_10_no_dino.log,pascal_zs_dit_t_0.3_layer_10.log,pascal_zs_dit_t_0.7_layer_5.log,pascal_zs_dit_t_0.8_layer_10.log,pascal_zs_dit_t_0.6_layer_18.log,pascal_zs_dit_t_0.7_layer_13_no_dino.log,pascal_zs_dit_t_0.8_layer_11_no_dino.log,pascal_zs_dit_t_0.3_layer_5_no_dino.log,pascal_zs_dit_t_0.7_layer_18.log,pascal_zs_dit_t_0.6_layer_12.log,pascal_zs_dit_t_0.8_layer_15_no_dino.log,pascal_zs_dit_t_0.7_layer_10.log,pascal_zs_dit_t_0.5_layer_18_no_dino.log,pascal_zs_dit_t_0.6_layer_13_no_dino.log,pascal_zs_dit_t_0.5_layer_5_no_dino.log,pascal_zs_dit_t_0.5_layer_13_no_dino.log,pascal_zs_dit_t_0.3_layer_13_no_dino.log,pascal_zs_dit_t_0.6_layer_10_no_dino.log,pascal_zs_dit_t_0.6_layer_18_no_dino.log,pascal_zs_dit_t_0.3_layer_20.log,pascal_zs_dit_t_0.8_layer_10_no_dino.log,pascal_zs_dit_t_0.7_layer_20.log,pascal_zs_dit_t_0.5_layer_11_no_dino.log,pascal_zs_dit_t_0.7_layer_9_no_dino.log,pascal_zs_dit_t_0.6_layer_5.log,pascal_zs_dit_t_0.7_layer_11_no_dino.log,pascal_zs_dit_t_0.6_layer_11.log,pascal_zs_dit_t_0.6_layer_20_no_dino.log,pascal_zs_dit_t_0.3_layer_9_no_dino.log,pascal_zs_dit_t_0.7_layer_15.log,pascal_zs_dit_t_0.5_layer_13.log,pascal_zs_dit_t_0.7_layer_5_no_dino.log,pascal_zs_dit_t_0.7_layer_11.log,pascal_zs_dit_t_0.3_layer_11_no_dino.log,pascal_zs_dit_t_0.5_layer_5.log,pascal_zs_dit_t_0.5_layer_9.log,pascal_zs_dit_t_0.7_layer_12.log,pascal_zs_dit_t_0.5_layer_18.log,pascal_zs_dit_t_0.3_layer_12_no_dino.log,pascal_zs_dit_t_0.6_layer_9_no_dino.log,pascal_zs_dit_t_0.5_layer_10_no_dino.log,pascal_zs_dit_t_0.8_layer_9.log,pascal_zs_dit_t_0.6_layer_13.log,pascal_zs_dit_t_0.5_layer_20.log,pascal_zs_dit_t_0.3_layer_18.log,pascal_zs_dit_t_0.3_layer_12.log,pascal_zs_dit_t_0.7_layer_9.log,pascal_zs_dit_t_0.8_layer_5.log,pascal_zs_dit_t_0.6_layer_5_no_dino.log,pascal_zs_dit_t_0.5_layer_15.log,pascal_zs_dit_t_0.8_layer_20_no_dino.log,pascal_zs_dit_t_0.5_layer_9_no_dino.log,pascal_zs_dit_t_0.8_layer_11.log,pascal_zs_dit_t_0.6_layer_15.log,pascal_zs_dit_t_0.5_layer_20_no_dino.log,pascal_zs_dit_t_0.5_layer_11.log,pascal_zs_dit_t_0.3_layer_20_no_dino.log,pascal_zs_dit_t_0.5_layer_15_no_dino.log,pascal_zs_dit_t_0.6_layer_12_no_dino.log,pascal_zs_dit_t_0.8_layer_18.log,pascal_zs_dit_t_0.3_layer_5.log,pascal_zs_dit_t_0.3_layer_11.log,pascal_zs_dit_t_0.7_layer_12_no_dino.log,pascal_zs_dit_t_0.8_layer_5_no_dino.log
2
+ PCK0.05,64.95,61.35,57.26,41.10,31.36,67.25,19.31,43.29,61.70,47.27,36.21,60.36,34.58,53.31,55.84,62.98,60.19,53.07,45.76,50.55,56.82,57.35,52.07,42.60,63.05,44.68,55.08,58.41,49.71,62.00,50.80,56.81,51.89,20.05,53.28,64.89,52.27,66.03,36.14,56.82,20.32,53.87,38.30,57.32,41.29,23.45,50.91,40.40,53.99,48.30,49.25,57.73,66.37,30.88,37.42,60.82,60.48,19.65,66.34,40.61,49.00,60.96,64.32,46.22,43.58,49.97,53.61,57.86,63.11,34.61,32.42,54.54,60.53,49.52,20.25,57.55,37.30,47.79,63.04,61.25,27.33,64.61,16.49,47.27,59.92,52.89,49.05,56.93,59.74,19.42
3
+ PCK0.10,79.62,77.80,74.95,60.83,47.56,81.60,32.57,67.88,79.57,65.37,52.50,78.20,51.23,73.36,75.55,78.95,76.89,70.89,64.16,69.53,74.31,73.39,67.90,60.01,79.24,61.27,73.67,75.93,69.18,80.64,66.51,74.27,74.26,43.51,70.29,81.16,70.02,81.43,51.32,74.04,45.04,69.81,56.54,73.77,56.73,38.05,72.25,57.69,71.15,69.74,68.87,75.88,80.98,46.78,57.31,77.81,77.26,45.15,81.32,60.09,68.43,77.05,80.35,63.45,63.45,69.60,70.34,77.06,79.07,51.53,49.18,73.84,78.49,69.49,45.46,73.48,54.09,67.17,80.38,76.22,42.87,79.52,28.75,62.24,76.40,71.06,68.19,74.97,77.74,44.61
4
+ PCK0.01,86.19,84.70,83.24,74.43,57.68,87.15,41.81,79.12,87.16,73.21,61.92,85.48,61.19,82.39,84.58,85.25,84.07,77.95,72.68,78.63,82.30,82.38,74.99,69.73,86.40,69.55,82.56,82.86,79.08,87.58,72.97,82.45,84.39,61.65,77.55,87.74,78.23,87.56,60.80,82.38,63.74,79.05,68.45,82.15,65.26,46.28,81.62,66.74,80.97,79.64,78.19,84.32,87.86,56.43,72.08,84.30,84.18,65.42,88.01,74.07,78.17,84.22,87.38,71.79,75.48,78.95,80.54,85.47,85.81,60.57,57.76,82.20,85.83,79.91,65.32,80.36,64.58,77.99,87.41,82.67,53.48,86.79,37.43,71.27,84.71,79.26,77.78,82.98,85.69,65.22
5
+ Key_1,t_0.5_layer_10,t_0.6_layer_9,t_0.6_layer_11_no_dino,t_0.3_layer_10_no_dino,t_0.3_layer_15_no_dino,t_0.6_layer_10,t_0.3_layer_18_no_dino,t_0.8_layer_9_no_dino,t_0.8_layer_12,t_0.3_layer_15,t_0.6_layer_20,t_0.8_layer_13,t_0.7_layer_20_no_dino,t_0.8_layer_13_no_dino,t_0.8_layer_12_no_dino,t_0.7_layer_13,t_0.8_layer_15,t_0.7_layer_15_no_dino,t_0.8_layer_18_no_dino,t_0.3_layer_13,t_0.3_layer_9,t_0.5_layer_12_no_dino,t_0.6_layer_15_no_dino,t_0.8_layer_20,t_0.5_layer_12,t_0.7_layer_18_no_dino,t_0.7_layer_10_no_dino,t_0.3_layer_10,t_0.7_layer_5,t_0.8_layer_10,t_0.6_layer_18,t_0.7_layer_13_no_dino,t_0.8_layer_11_no_dino,t_0.3_layer_5_no_dino,t_0.7_layer_18,t_0.6_layer_12,t_0.8_layer_15_no_dino,t_0.7_layer_10,t_0.5_layer_18_no_dino,t_0.6_layer_13_no_dino,t_0.5_layer_5_no_dino,t_0.5_layer_13_no_dino,t_0.3_layer_13_no_dino,t_0.6_layer_10_no_dino,t_0.6_layer_18_no_dino,t_0.3_layer_20,t_0.8_layer_10_no_dino,t_0.7_layer_20,t_0.5_layer_11_no_dino,t_0.7_layer_9_no_dino,t_0.6_layer_5,t_0.7_layer_11_no_dino,t_0.6_layer_11,t_0.6_layer_20_no_dino,t_0.3_layer_9_no_dino,t_0.7_layer_15,t_0.5_layer_13,t_0.7_layer_5_no_dino,t_0.7_layer_11,t_0.3_layer_11_no_dino,t_0.5_layer_5,t_0.5_layer_9,t_0.7_layer_12,t_0.5_layer_18,t_0.3_layer_12_no_dino,t_0.6_layer_9_no_dino,t_0.5_layer_10_no_dino,t_0.8_layer_9,t_0.6_layer_13,t_0.5_layer_20,t_0.3_layer_18,t_0.3_layer_12,t_0.7_layer_9,t_0.8_layer_5,t_0.6_layer_5_no_dino,t_0.5_layer_15,t_0.8_layer_20_no_dino,t_0.5_layer_9_no_dino,t_0.8_layer_11,t_0.6_layer_15,t_0.5_layer_20_no_dino,t_0.5_layer_11,t_0.3_layer_20_no_dino,t_0.5_layer_15_no_dino,t_0.6_layer_12_no_dino,t_0.8_layer_18,t_0.3_layer_5,t_0.3_layer_11,t_0.7_layer_12_no_dino,t_0.8_layer_5_no_dino
Code/sc_dit/utils.py ADDED
@@ -0,0 +1,869 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import torch
3
+ import torch.nn.functional as F
4
+ from torch import Tensor
5
+ import numpy as np
6
+ import math
7
+
8
+ def resize_and_pad(image_tensor, output_size):
9
+ """
10
+ Resizes an image tensor to a square shape by scaling and padding.
11
+
12
+ Args:
13
+ image_tensor (torch.Tensor): Input image tensor of shape (H, W).
14
+ output_size (int): The desired square output size.
15
+
16
+ Returns:
17
+ torch.Tensor: The resized and padded image tensor of shape (output_size, output_size).
18
+ """
19
+ original_h, original_w = image_tensor.shape
20
+
21
+ # 1. Calculate the scale factor to fit the longest side to output_size
22
+ scale = output_size / max(original_h, original_w)
23
+ new_h, new_w = int(original_h * scale), int(original_w * scale)
24
+
25
+ # Add batch and channel dimensions for F.interpolate
26
+ image_tensor = image_tensor.unsqueeze(0).unsqueeze(0)
27
+
28
+ # 2. Resize the image, preserving aspect ratio
29
+ resized_image = F.interpolate(image_tensor, size=(new_h, new_w), mode='bilinear', align_corners=False)
30
+
31
+ # 3. Calculate padding for the shorter side
32
+ pad_h = output_size - new_h
33
+ pad_w = output_size - new_w
34
+
35
+ # Padding format is (left, right, top, bottom)
36
+ padding = (pad_w // 2, pad_w - (pad_w // 2), pad_h // 2, pad_h - (pad_h // 2))
37
+
38
+ # 4. Pad the image with a constant value (0 for black)
39
+ padded_image = F.pad(resized_image, padding, "constant", 0)
40
+
41
+ return padded_image.squeeze()
42
+
43
+
44
+ def resize(img, target_res=224, resize=True, to_pil=True, edge=False):
45
+ original_width, original_height = img.size
46
+ original_channels = len(img.getbands())
47
+ if not edge:
48
+ canvas = np.zeros([target_res, target_res, 3], dtype=np.uint8)
49
+ if original_channels == 1:
50
+ canvas = np.zeros([target_res, target_res], dtype=np.uint8)
51
+ if original_height <= original_width:
52
+ if resize:
53
+ img = img.resize((target_res, int(np.around(target_res * original_height / original_width))), Image.Resampling.LANCZOS)
54
+ width, height = img.size
55
+ img = np.asarray(img)
56
+ canvas[(width - height) // 2: (width + height) // 2] = img
57
+ else:
58
+ if resize:
59
+ img = img.resize((int(np.around(target_res * original_width / original_height)), target_res), Image.Resampling.LANCZOS)
60
+ width, height = img.size
61
+ img = np.asarray(img)
62
+ canvas[:, (height - width) // 2: (height + width) // 2] = img
63
+ else:
64
+ if original_height <= original_width:
65
+ if resize:
66
+ img = img.resize((target_res, int(np.around(target_res * original_height / original_width))), Image.Resampling.LANCZOS)
67
+ width, height = img.size
68
+ img = np.asarray(img)
69
+ top_pad = (target_res - height) // 2
70
+ bottom_pad = target_res - height - top_pad
71
+ img = np.pad(img, pad_width=[(top_pad, bottom_pad), (0, 0), (0, 0)], mode='edge')
72
+ else:
73
+ if resize:
74
+ img = img.resize((int(np.around(target_res * original_width / original_height)), target_res), Image.Resampling.LANCZOS)
75
+ width, height = img.size
76
+ img = np.asarray(img)
77
+ left_pad = (target_res - width) // 2
78
+ right_pad = target_res - width - left_pad
79
+ img = np.pad(img, pad_width=[(0, 0), (left_pad, right_pad), (0, 0)], mode='edge')
80
+ canvas = img
81
+ if to_pil:
82
+ canvas = Image.fromarray(canvas)
83
+ return canvas
84
+
85
+ def scaled_shifted_sigmoid(
86
+ x: Tensor,
87
+ a: float = 1.0, # vertical scale
88
+ b: float = 1.0, # slope (steepness)
89
+ c: float = 0.0, # horizontal shift (bias)
90
+ d: float = 0.0, # vertical shift (baseline)
91
+ ) -> Tensor:
92
+ """
93
+ Compute a scaled-and-shifted sigmoid: y = a * sigmoid(b * x + c) + d.
94
+
95
+ Parameters
96
+ ----------
97
+ x : torch.Tensor
98
+ Input tensor.
99
+ a : float, default 1.0
100
+ Output scale (amplitude).
101
+ b : float, default 1.0
102
+ Input scale (controls slope).
103
+ c : float, default 0.0
104
+ Input shift (horizontal translation).
105
+ d : float, default 0.0
106
+ Output shift (vertical translation).
107
+
108
+ Returns
109
+ -------
110
+ torch.Tensor
111
+ Tensor with the same shape as `x` after applying the transformation.
112
+ """
113
+ return a * torch.sigmoid(b * x + c) + d
114
+
115
+
116
+ ############
117
+ # for 2D to 3D correspondence with cropping
118
+
119
+ from scipy.ndimage import distance_transform_edt as edt
120
+ from scipy.ndimage import gaussian_filter
121
+ # from skimage import img_as_ubyte
122
+ from PIL import Image
123
+ from pathlib import Path
124
+ import numpy as np
125
+ from typing import Tuple
126
+
127
+ # ✨ New helper to find the object's bounding box from transparency
128
+ def get_bbox_from_alpha(image_path: Path) -> Tuple[int, int, int, int]:
129
+ """Calculates a bounding box from the alpha channel of a PNG."""
130
+ with Image.open(image_path).convert("RGBA") as img:
131
+ alpha = np.array(img)[:, :, 3]
132
+ non_transparent_pixels = np.argwhere(alpha > 0)
133
+ y_min, x_min = non_transparent_pixels.min(axis=0)
134
+ y_max, x_max = non_transparent_pixels.max(axis=0)
135
+ return x_min, y_min, x_max, y_max
136
+
137
+ # ... (rest of your imports and functions)
138
+
139
+
140
+
141
+ #####################
142
+ # dataset utils loading functions
143
+ #####################
144
+ import os
145
+ import json
146
+ import numpy as np
147
+ import pandas as pd
148
+ import torch
149
+ from glob import glob
150
+ # from scipy.io import loadmat as read_mat
151
+ import scipy.io as sio
152
+
153
+
154
+ def read_mat(path, obj_name):
155
+ r"""Reads specified objects from Matlab data file, (.mat)"""
156
+ mat_contents = sio.loadmat(path)
157
+ mat_obj = mat_contents[obj_name]
158
+
159
+ return mat_obj
160
+
161
+ def process_kps_pascal(kps):
162
+ # Step 1: Reshape the array to (20, 2) by adding nan values
163
+ num_pad_rows = 20 - kps.shape[0]
164
+ if num_pad_rows > 0:
165
+ pad_values = np.full((num_pad_rows, 2), np.nan)
166
+ kps = np.vstack((kps, pad_values))
167
+
168
+ # Step 2: Reshape the array to (20, 3)
169
+ # Add an extra column: set to 1 if the row does not contain nan, 0 otherwise
170
+ last_col = np.isnan(kps).any(axis=1)
171
+ last_col = np.where(last_col, 0, 1)
172
+ kps = np.column_stack((kps, last_col))
173
+
174
+ # Step 3: Replace rows with nan values to all 0's
175
+ mask = np.isnan(kps).any(axis=1)
176
+ kps[mask] = 0
177
+
178
+ return torch.tensor(kps).float()
179
+
180
+ def preprocess_kps_pad(kps, img_width, img_height, size):
181
+ # Once an image has been pre-processed via border (or zero) padding,
182
+ # the location of key points needs to be updated. This function applies
183
+ # that pre-processing to the key points so they are correctly located
184
+ # in the border-padded (or zero-padded) image.
185
+ kps = kps.clone()
186
+ scale = size / max(img_width, img_height)
187
+ kps[:, [0, 1]] *= scale
188
+ if img_height < img_width:
189
+ new_h = int(np.around(size * img_height / img_width))
190
+ offset_y = int((size - new_h) / 2)
191
+ offset_x = 0
192
+ kps[:, 1] += offset_y
193
+ elif img_width < img_height:
194
+ new_w = int(np.around(size * img_width / img_height))
195
+ offset_x = int((size - new_w) / 2)
196
+ offset_y = 0
197
+ kps[:, 0] += offset_x
198
+ else:
199
+ offset_x = 0
200
+ offset_y = 0
201
+ kps *= kps[:, 2:3].clone() # zero-out any non-visible key points
202
+ return kps, offset_x, offset_y, scale
203
+
204
+
205
+ def load_pascal_data(path="data/PF-dataset-PASCAL", size=256, category='cat', split='test', subsample=None):
206
+
207
+ def get_points(point_coords_list, idx):
208
+ X = np.fromstring(point_coords_list.iloc[idx, 0], sep=";")
209
+ Y = np.fromstring(point_coords_list.iloc[idx, 1], sep=";")
210
+ Xpad = -np.ones(20)
211
+ Xpad[: len(X)] = X
212
+ Ypad = -np.ones(20)
213
+ Ypad[: len(X)] = Y
214
+ Zmask = np.zeros(20)
215
+ Zmask[: len(X)] = 1
216
+ point_coords = np.concatenate(
217
+ (Xpad.reshape(1, 20), Ypad.reshape(1, 20), Zmask.reshape(1,20)), axis=0
218
+ )
219
+ # make arrays float tensor for subsequent processing
220
+ point_coords = torch.Tensor(point_coords.astype(np.float32))
221
+ return point_coords
222
+
223
+ np.random.seed(42)
224
+ files = []
225
+ kps = []
226
+ test_data = pd.read_csv(f'{path}/{split}_pairs_pf_pascal.csv')
227
+ cls = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
228
+ 'bus', 'car', 'cat', 'chair', 'cow',
229
+ 'diningtable', 'dog', 'horse', 'motorbike', 'person',
230
+ 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
231
+ cls_ids = test_data.iloc[:,2].values.astype("int") - 1
232
+ cat_id = cls.index(category)
233
+ subset_id = np.where(cls_ids == cat_id)[0]
234
+ # logger.info(f'Number of Pairs for {category} = {len(subset_id)}')
235
+ subset_pairs = test_data.iloc[subset_id,:]
236
+ src_img_names = np.array(subset_pairs.iloc[:,0])
237
+ trg_img_names = np.array(subset_pairs.iloc[:,1])
238
+ # print(src_img_names.shape, trg_img_names.shape)
239
+ if not split.startswith('train'):
240
+ point_A_coords = subset_pairs.iloc[:,3:5]
241
+ point_B_coords = subset_pairs.iloc[:,5:]
242
+ # print(point_A_coords.shape, point_B_coords.shape)
243
+ for i in range(len(src_img_names)):
244
+ src_fn= f'{path}/../{src_img_names[i]}'
245
+ trg_fn= f'{path}/../{trg_img_names[i]}'
246
+ src_size=Image.open(src_fn).size
247
+ trg_size=Image.open(trg_fn).size
248
+
249
+ if not split.startswith('train'):
250
+ point_coords_src = get_points(point_A_coords, i).transpose(1,0)
251
+ point_coords_trg = get_points(point_B_coords, i).transpose(1,0)
252
+ else:
253
+ src_anns = os.path.join(path, 'Annotations', category,
254
+ os.path.basename(src_fn))[:-4] + '.mat'
255
+ trg_anns = os.path.join(path, 'Annotations', category,
256
+ os.path.basename(trg_fn))[:-4] + '.mat'
257
+ point_coords_src = process_kps_pascal(read_mat(src_anns, 'kps'))
258
+ point_coords_trg = process_kps_pascal(read_mat(trg_anns, 'kps'))
259
+
260
+ # print(src_size)
261
+ source_kps, src_x, src_y, src_scale = preprocess_kps_pad(point_coords_src, src_size[0], src_size[1], size)
262
+ target_kps, trg_x, trg_y, trg_scale = preprocess_kps_pad(point_coords_trg, trg_size[0], trg_size[1], size)
263
+ kps.append(source_kps)
264
+ kps.append(target_kps)
265
+ files.append(src_fn)
266
+ files.append(trg_fn)
267
+
268
+ kps = torch.stack(kps)
269
+ used_kps, = torch.where(kps[:, :, 2].any(dim=0))
270
+ kps = kps[:, used_kps, :]
271
+ # logger.info(f'Final number of used key points: {kps.size(1)}')
272
+ return files, kps, None, used_kps
273
+
274
+
275
+ def load_spair_data(path="data/SPair-71k", size=256, category='cat', split='test', subsample=None):
276
+ np.random.seed(42)
277
+ pairs = sorted(glob(f'{path}/PairAnnotation/{split}/*:{category}.json'))
278
+ if subsample is not None and subsample > 0:
279
+ pairs = [pairs[ix] for ix in np.random.choice(len(pairs), subsample)]
280
+ files = []
281
+ thresholds = []
282
+ kps = []
283
+ category_anno = list(glob(f'{path}/ImageAnnotation/{category}/*.json'))[0]
284
+ with open(category_anno) as f:
285
+ num_kps = len(json.load(f)['kps'])
286
+ for pair in pairs:
287
+ source_kps = torch.zeros(num_kps, 3)
288
+ target_kps = torch.zeros(num_kps, 3)
289
+ with open(pair) as f:
290
+ data = json.load(f)
291
+ assert category == data["category"]
292
+ source_fn = f'{path}/JPEGImages/{category}/{data["src_imname"]}'
293
+ target_fn = f'{path}/JPEGImages/{category}/{data["trg_imname"]}'
294
+ source_json_name = source_fn.replace('JPEGImages','ImageAnnotation').replace('jpg','json')
295
+ target_json_name = target_fn.replace('JPEGImages','ImageAnnotation').replace('jpg','json')
296
+ source_bbox = np.asarray(data["src_bndbox"]) # (x1, y1, x2, y2)
297
+ target_bbox = np.asarray(data["trg_bndbox"])
298
+ with open(source_json_name) as f:
299
+ file = json.load(f)
300
+ kpts_src = file['kps']
301
+ with open(target_json_name) as f:
302
+ file = json.load(f)
303
+ kpts_trg = file['kps']
304
+
305
+ source_size = data["src_imsize"][:2] # (W, H)
306
+ target_size = data["trg_imsize"][:2] # (W, H)
307
+
308
+ for i in range(30):
309
+ point = kpts_src[str(i)]
310
+ if point is None:
311
+ source_kps[i, :3] = 0
312
+ else:
313
+ source_kps[i, :2] = torch.Tensor(point).float() # set x and y
314
+ source_kps[i, 2] = 1
315
+ source_kps, src_x, src_y, src_scale = preprocess_kps_pad(source_kps, source_size[0], source_size[1], size)
316
+
317
+ for i in range(30):
318
+ point = kpts_trg[str(i)]
319
+ if point is None:
320
+ target_kps[i, :3] = 0
321
+ else:
322
+ target_kps[i, :2] = torch.Tensor(point).float()
323
+ target_kps[i, 2] = 1
324
+ # target_raw_kps = torch.cat([torch.tensor(data["trg_kps"], dtype=torch.float), torch.ones(kp_ixs.size(0), 1)], 1)
325
+ # target_kps = blank_kps.scatter(dim=0, index=kp_ixs, src=target_raw_kps)
326
+ target_kps, trg_x, trg_y, trg_scale = preprocess_kps_pad(target_kps, target_size[0], target_size[1], size)
327
+ if split == 'test' or split == 'val':
328
+ thresholds.append(max(target_bbox[3] - target_bbox[1], target_bbox[2] - target_bbox[0])*trg_scale)
329
+ elif split == 'trn':
330
+ thresholds.append(max(source_bbox[3] - source_bbox[1], source_bbox[2] - source_bbox[0])*src_scale)
331
+ thresholds.append(max(target_bbox[3] - target_bbox[1], target_bbox[2] - target_bbox[0])*trg_scale)
332
+
333
+ kps.append(source_kps)
334
+ kps.append(target_kps)
335
+ files.append(source_fn)
336
+ files.append(target_fn)
337
+ kps = torch.stack(kps)
338
+ used_kps, = torch.where(kps[:, :, 2].any(dim=0))
339
+ kps = kps[:, used_kps, :]
340
+
341
+ return files, kps, thresholds, used_kps
342
+
343
+
344
+ def load_specific_pascal_pair(
345
+ source_image_id: str,
346
+ target_image_id: str,
347
+ path: str = "data/PF-dataset-PASCAL",
348
+ size: int = 256,
349
+ split: str = 'test'
350
+ ):
351
+ """
352
+ Loads and processes a specific pair of source and target images from the PASCAL dataset.
353
+
354
+ Args:
355
+ source_image_id: The identifier of the source image (e.g., '2011_001407').
356
+ target_image_id: The identifier of the target image (e.g., '2010_004184').
357
+ path: The base path to the PF-PASCAL dataset directory.
358
+ size: The target size for preprocessing images.
359
+ split: The dataset split to use ('test', 'train', etc.).
360
+
361
+ Returns:
362
+ A tuple containing:
363
+ - files (list): A list with the full paths to the source and target images.
364
+ - kps (torch.Tensor): A tensor of processed keypoints for the image pair.
365
+ - None: A placeholder to match the original function's return format.
366
+ - used_kps_indices (torch.Tensor): A tensor of indices for keypoints present in either image.
367
+ """
368
+
369
+ def get_points_from_strings(x_str: str, y_str: str) -> torch.Tensor:
370
+ """Parses coordinate strings, pads them, and returns a tensor."""
371
+ X = np.fromstring(x_str, sep=";")
372
+ Y = np.fromstring(y_str, sep=";")
373
+
374
+ # Pad arrays to a fixed size of 20 (as in the original function)
375
+ Xpad = -np.ones(20)
376
+ Xpad[:len(X)] = X
377
+ Ypad = -np.ones(20)
378
+ Ypad[:len(Y)] = Y
379
+
380
+ # Create a mask for valid keypoints
381
+ Zmask = np.zeros(20)
382
+ Zmask[:len(X)] = 1
383
+
384
+ point_coords = np.stack((Xpad, Ypad, Zmask), axis=0)
385
+ return torch.from_numpy(point_coords.astype(np.float32))
386
+
387
+ # Construct the path to the CSV file and load it
388
+ csv_path = os.path.join(path, f'{split}_pairs_pf_pascal.csv')
389
+ try:
390
+ pairs_df = pd.read_csv(csv_path)
391
+ except FileNotFoundError:
392
+ print(f"Error: CSV file not found at '{csv_path}'")
393
+ return None, None, None, None
394
+
395
+ # Find the specific row matching the source and target image IDs
396
+ pair_row = pairs_df[
397
+ pairs_df['source_image'].str.contains(source_image_id) &
398
+ pairs_df['target_image'].str.contains(target_image_id)
399
+ ]
400
+
401
+ if pair_row.empty:
402
+ print(f"Error: Pair for source '{source_image_id}' and target '{target_image_id}' not found.")
403
+ return None, None, None, None
404
+
405
+ # Select the first match
406
+ pair_data = pair_row.iloc[0]
407
+
408
+ # Get full image paths and dimensions
409
+ src_fn = os.path.join(path, '..', pair_data['source_image'])
410
+ trg_fn = os.path.join(path, '..', pair_data['target_image'])
411
+
412
+ try:
413
+ src_size = Image.open(src_fn).size
414
+ trg_size = Image.open(trg_fn).size
415
+ except FileNotFoundError as e:
416
+ print(f"Error: Image file not found: {e.filename}")
417
+ return None, None, None, None
418
+
419
+ # Process keypoints based on the split type
420
+ if not split.startswith('train'):
421
+ point_coords_src = get_points_from_strings(pair_data['XA'], pair_data['YA']).T
422
+ point_coords_trg = get_points_from_strings(pair_data['XB'], pair_data['YB']).T
423
+ else:
424
+ # This logic for the 'train' split is preserved from the original function
425
+ cls_list = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car',
426
+ 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike',
427
+ 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
428
+ category = cls_list[pair_data['class'] - 1]
429
+
430
+ src_anns_path = os.path.join(path, 'Annotations', category, os.path.basename(src_fn).replace('.jpg', '.mat'))
431
+ trg_anns_path = os.path.join(path, 'Annotations', category, os.path.basename(trg_fn).replace('.jpg', '.mat'))
432
+
433
+ point_coords_src = process_kps_pascal(read_mat(src_anns_path, 'kps'))
434
+ point_coords_trg = process_kps_pascal(read_mat(trg_anns_path, 'kps'))
435
+
436
+ # Preprocess keypoints (e.g., padding and scaling)
437
+ source_kps, _, _, _ = preprocess_kps_pad(point_coords_src, src_size[0], src_size[1], size)
438
+ target_kps, _, _, _ = preprocess_kps_pad(point_coords_trg, trg_size[0], trg_size[1], size)
439
+
440
+ # Stack keypoints and find the indices of keypoints present in at least one image
441
+ kps = torch.stack([source_kps, target_kps])
442
+ used_kps_indices, = torch.where(kps[:, :, 2].any(dim=0))
443
+
444
+ # Filter the keypoints tensor to include only the used keypoints
445
+ kps_final = kps[:, used_kps_indices, :]
446
+
447
+ return [src_fn, trg_fn], kps_final, None, used_kps_indices
448
+
449
+ import matplotlib.pyplot as plt
450
+ def load_img_and_kps(idx, files, kps, img_size=224, edge=False, load_masked=False):
451
+ if load_masked:
452
+ img_rgba = Image.open(files[idx].replace('JPEGImages', 'JPEGImages_bgd_rmv').replace('.jpg', '_bgd_rmv.png')).convert('RGBA')
453
+
454
+ # img_rgba = Image.open(path_image).convert("RGBA")
455
+
456
+ # 2. create a white background and composite
457
+ img = Image.new("RGB", img_rgba.size, (0, 0, 0)) # choose any colour here
458
+ img.paste(img_rgba, mask=img_rgba.split()[3]) # mask = alpha channel
459
+ plt.imsave("img2_masked_before_resize.png", np.array(img))
460
+ # print(np.array(img).shape)
461
+ else:
462
+ img = Image.open(files[idx]).convert('RGB')
463
+ img = resize(img, img_size, resize=True, to_pil=True, edge=edge)
464
+ if load_masked:
465
+ plt.imsave("img2_masked_after_resize.png", np.array(img))
466
+ img_kps = kps[idx]
467
+
468
+ return img, img_kps
469
+
470
+
471
+ import os
472
+ import json
473
+ from glob import glob
474
+ import numpy as np
475
+ import torch
476
+
477
+ # NOTE: The helper function preprocess_kps_pad(kps, width, height, size)
478
+ # is assumed to be defined elsewhere, as in your original code.
479
+
480
+ def load_specific_spair_pair(
481
+ source_image_name: str,
482
+ target_image_name: str,
483
+ category: str,
484
+ path: str = "data/SPair-71k",
485
+ size: int = 256,
486
+ split: str = 'test',
487
+ unfiltered: bool = False
488
+
489
+ ):
490
+ """
491
+ Loads and processes a specific pair of images from the SPair-71k dataset.
492
+
493
+ Args:
494
+ source_image_name (str): Filename of the source image (e.g., '2008_002719.jpg').
495
+ target_image_name (str): Filename of the target image (e.g., '2008_004100.jpg').
496
+ category (str): The object category (e.g., 'aeroplane').
497
+ path (str): The base path to the SPair-71k dataset directory.
498
+ size (int): The target size for preprocessing images.
499
+ split (str): The dataset split to use ('test', 'trn', 'val').
500
+
501
+ Returns:
502
+ A tuple containing:
503
+ - files (list): Full paths to the source and target images.
504
+ - kps (torch.Tensor): Processed keypoints for the pair.
505
+ - thresholds (list): Bounding-box based thresholds for the pair.
506
+ - used_kps_indices (torch.Tensor): Indices of keypoints present in either image.
507
+ """
508
+
509
+ # Helper to create a keypoint tensor from the annotation dictionary
510
+ def _get_kps_tensor(kps_dict, num_kps):
511
+ kps_tensor = torch.zeros(num_kps, 3)
512
+ for i in range(num_kps):
513
+ point = kps_dict.get(str(i)) # Use .get() for safety
514
+ if point is not None:
515
+ kps_tensor[i, :2] = torch.tensor(point, dtype=torch.float)
516
+ kps_tensor[i, 2] = 1.0 # Mark as visible
517
+ return kps_tensor
518
+
519
+ # --- 1. Find the correct pair annotation file ---
520
+ pair_annotation_path = os.path.join(path, 'PairAnnotation', split)
521
+ candidate_files = glob(os.path.join(pair_annotation_path, f'*:{category}.json'))
522
+
523
+ pair_data = None
524
+ for file_path in candidate_files:
525
+ with open(file_path) as f:
526
+ data = json.load(f)
527
+ if data['src_imname'] == source_image_name and data['trg_imname'] == target_image_name:
528
+ pair_data = data
529
+ break
530
+
531
+ if pair_data is None:
532
+ print(f"Error: Pair for '{source_image_name}' and '{target_image_name}' not found.")
533
+ return None, None, None, None
534
+
535
+ # --- 2. Process the found pair ---
536
+ source_fn = os.path.join(path, 'JPEGImages', category, pair_data['src_imname'])
537
+ target_fn = os.path.join(path, 'JPEGImages', category, pair_data['trg_imname'])
538
+ files = [source_fn, target_fn]
539
+
540
+ # Get total number of keypoints for the category
541
+ try:
542
+ category_anno_path = glob(os.path.join(path, 'ImageAnnotation', category, '*.json'))[0]
543
+ with open(category_anno_path) as f:
544
+ num_kps = len(json.load(f)['kps'])
545
+ except IndexError:
546
+ print(f"Error: No image annotations found for category '{category}'.")
547
+ return None, None, None, None
548
+
549
+ # Get keypoints from individual image annotation files
550
+ source_json_path = source_fn.replace('JPEGImages', 'ImageAnnotation').replace('.jpg', '.json')
551
+ target_json_path = target_fn.replace('JPEGImages', 'ImageAnnotation').replace('.jpg', '.json')
552
+
553
+ with open(source_json_path) as f:
554
+ kpts_src_dict = json.load(f)['kps']
555
+ with open(target_json_path) as f:
556
+ kpts_trg_dict = json.load(f)['kps']
557
+
558
+ source_kps_raw = _get_kps_tensor(kpts_src_dict, num_kps)
559
+ target_kps_raw = _get_kps_tensor(kpts_trg_dict, num_kps)
560
+
561
+ # print(f"Source keypoints raw: {source_kps_raw.shape}, Target keypoints raw: {target_kps_raw.shape}")
562
+
563
+ # Preprocess keypoints (padding, scaling, etc.)
564
+ w_src, h_src = pair_data["src_imsize"][:2]
565
+ w_trg, h_trg = pair_data["trg_imsize"][:2]
566
+
567
+ source_kps, src_x, src_y, src_scale = preprocess_kps_pad(source_kps_raw, w_src, h_src, size)
568
+ target_kps, trg_x, trg_y, trg_scale = preprocess_kps_pad(target_kps_raw, w_trg, h_trg, size)
569
+
570
+ # Calculate thresholds from bounding boxes
571
+ source_bbox = np.asarray(pair_data["src_bndbox"])
572
+ target_bbox = np.asarray(pair_data["trg_bndbox"])
573
+ thresholds = []
574
+ if split == 'test' or split == 'val':
575
+ thresholds.append(max(target_bbox[3] - target_bbox[1], target_bbox[2] - target_bbox[0]) * trg_scale)
576
+ elif split == 'trn':
577
+ thresholds.append(max(source_bbox[3] - source_bbox[1], source_bbox[2] - source_bbox[0]) * src_scale)
578
+ thresholds.append(max(target_bbox[3] - target_bbox[1], target_bbox[2] - target_bbox[0]) * trg_scale)
579
+
580
+ # --- 3. Format output ---
581
+ kps = torch.stack([source_kps, target_kps])
582
+ used_kps_indices, = torch.where(kps[:, :, 2].any(dim=0))
583
+ kps_final = kps[:, used_kps_indices, :]
584
+
585
+ if unfiltered:
586
+ return files, kps, thresholds, used_kps_indices
587
+ else:
588
+ return files, kps_final, thresholds, used_kps_indices
589
+
590
+
591
+
592
+
593
+ ######################################
594
+ # original loading function
595
+ ######################################
596
+
597
+ def load_spair_data(path="data/SPair-71k", size=256, category='cat', split='test', subsample=None):
598
+ np.random.seed(42)
599
+ pairs = sorted(glob(f'{path}/PairAnnotation/{split}/*:{category}.json'))
600
+ if subsample is not None and subsample > 0:
601
+ pairs = [pairs[ix] for ix in np.random.choice(len(pairs), subsample)]
602
+ files = []
603
+ thresholds = []
604
+ kps = []
605
+ category_anno = list(glob(f'{path}/ImageAnnotation/{category}/*.json'))[0]
606
+ with open(category_anno) as f:
607
+ num_kps = len(json.load(f)['kps'])
608
+ for pair in pairs:
609
+ source_kps = torch.zeros(num_kps, 3)
610
+ target_kps = torch.zeros(num_kps, 3)
611
+ with open(pair) as f:
612
+ data = json.load(f)
613
+ assert category == data["category"]
614
+ source_fn = f'{path}/JPEGImages/{category}/{data["src_imname"]}'
615
+ target_fn = f'{path}/JPEGImages/{category}/{data["trg_imname"]}'
616
+ source_json_name = source_fn.replace('JPEGImages','ImageAnnotation').replace('jpg','json')
617
+ target_json_name = target_fn.replace('JPEGImages','ImageAnnotation').replace('jpg','json')
618
+ source_bbox = np.asarray(data["src_bndbox"]) # (x1, y1, x2, y2)
619
+ target_bbox = np.asarray(data["trg_bndbox"])
620
+ with open(source_json_name) as f:
621
+ file = json.load(f)
622
+ kpts_src = file['kps']
623
+ with open(target_json_name) as f:
624
+ file = json.load(f)
625
+ kpts_trg = file['kps']
626
+
627
+ source_size = data["src_imsize"][:2] # (W, H)
628
+ target_size = data["trg_imsize"][:2] # (W, H)
629
+
630
+ for i in range(30):
631
+ point = kpts_src[str(i)]
632
+ if point is None:
633
+ source_kps[i, :3] = 0
634
+ else:
635
+ source_kps[i, :2] = torch.Tensor(point).float() # set x and y
636
+ source_kps[i, 2] = 1
637
+ source_kps, src_x, src_y, src_scale = preprocess_kps_pad(source_kps, source_size[0], source_size[1], size)
638
+
639
+ for i in range(30):
640
+ point = kpts_trg[str(i)]
641
+ if point is None:
642
+ target_kps[i, :3] = 0
643
+ else:
644
+ target_kps[i, :2] = torch.Tensor(point).float()
645
+ target_kps[i, 2] = 1
646
+ # target_raw_kps = torch.cat([torch.tensor(data["trg_kps"], dtype=torch.float), torch.ones(kp_ixs.size(0), 1)], 1)
647
+ # target_kps = blank_kps.scatter(dim=0, index=kp_ixs, src=target_raw_kps)
648
+ target_kps, trg_x, trg_y, trg_scale = preprocess_kps_pad(target_kps, target_size[0], target_size[1], size)
649
+ if split == 'test' or split == 'val':
650
+ thresholds.append(max(target_bbox[3] - target_bbox[1], target_bbox[2] - target_bbox[0])*trg_scale)
651
+ elif split == 'trn':
652
+ thresholds.append(max(source_bbox[3] - source_bbox[1], source_bbox[2] - source_bbox[0])*src_scale)
653
+ thresholds.append(max(target_bbox[3] - target_bbox[1], target_bbox[2] - target_bbox[0])*trg_scale)
654
+
655
+ kps.append(source_kps)
656
+ kps.append(target_kps)
657
+ files.append(source_fn)
658
+ files.append(target_fn)
659
+ kps = torch.stack(kps)
660
+ used_kps, = torch.where(kps[:, :, 2].any(dim=0))
661
+ kps = kps[:, used_kps, :]
662
+
663
+ return files, kps, thresholds, used_kps
664
+
665
+
666
+ def load_pascal_data(path="data/PF-dataset-PASCAL", size=256, category='cat', split='test', subsample=None):
667
+
668
+ def get_points(point_coords_list, idx):
669
+ X = np.fromstring(point_coords_list.iloc[idx, 0], sep=";")
670
+ Y = np.fromstring(point_coords_list.iloc[idx, 1], sep=";")
671
+ Xpad = -np.ones(20)
672
+ Xpad[: len(X)] = X
673
+ Ypad = -np.ones(20)
674
+ Ypad[: len(X)] = Y
675
+ Zmask = np.zeros(20)
676
+ Zmask[: len(X)] = 1
677
+ point_coords = np.concatenate(
678
+ (Xpad.reshape(1, 20), Ypad.reshape(1, 20), Zmask.reshape(1,20)), axis=0
679
+ )
680
+ # make arrays float tensor for subsequent processing
681
+ point_coords = torch.Tensor(point_coords.astype(np.float32))
682
+ return point_coords
683
+
684
+ np.random.seed(42)
685
+ files = []
686
+ kps = []
687
+ test_data = pd.read_csv(f'{path}/{split}_pairs_pf_pascal.csv')
688
+ cls = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
689
+ 'bus', 'car', 'cat', 'chair', 'cow',
690
+ 'diningtable', 'dog', 'horse', 'motorbike', 'person',
691
+ 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
692
+ cls_ids = test_data.iloc[:,2].values.astype("int") - 1
693
+ cat_id = cls.index(category)
694
+ subset_id = np.where(cls_ids == cat_id)[0]
695
+ # logger.info(f'Number of Pairs for {category} = {len(subset_id)}')
696
+ subset_pairs = test_data.iloc[subset_id,:]
697
+ src_img_names = np.array(subset_pairs.iloc[:,0])
698
+ trg_img_names = np.array(subset_pairs.iloc[:,1])
699
+ # print(src_img_names.shape, trg_img_names.shape)
700
+ if not split.startswith('train'):
701
+ point_A_coords = subset_pairs.iloc[:,3:5]
702
+ point_B_coords = subset_pairs.iloc[:,5:]
703
+ # print(point_A_coords.shape, point_B_coords.shape)
704
+ for i in range(len(src_img_names)):
705
+ src_fn= f'{path}/../{src_img_names[i]}'
706
+ trg_fn= f'{path}/../{trg_img_names[i]}'
707
+ src_size=Image.open(src_fn).size
708
+ trg_size=Image.open(trg_fn).size
709
+
710
+ if not split.startswith('train'):
711
+ point_coords_src = get_points(point_A_coords, i).transpose(1,0)
712
+ point_coords_trg = get_points(point_B_coords, i).transpose(1,0)
713
+ else:
714
+ src_anns = os.path.join(path, 'Annotations', category,
715
+ os.path.basename(src_fn))[:-4] + '.mat'
716
+ trg_anns = os.path.join(path, 'Annotations', category,
717
+ os.path.basename(trg_fn))[:-4] + '.mat'
718
+ point_coords_src = process_kps_pascal(read_mat(src_anns, 'kps'))
719
+ point_coords_trg = process_kps_pascal(read_mat(trg_anns, 'kps'))
720
+
721
+ # print(src_size)
722
+ source_kps, src_x, src_y, src_scale = preprocess_kps_pad(point_coords_src, src_size[0], src_size[1], size)
723
+ target_kps, trg_x, trg_y, trg_scale = preprocess_kps_pad(point_coords_trg, trg_size[0], trg_size[1], size)
724
+ kps.append(source_kps)
725
+ kps.append(target_kps)
726
+ files.append(src_fn)
727
+ files.append(trg_fn)
728
+
729
+ kps = torch.stack(kps)
730
+ used_kps, = torch.where(kps[:, :, 2].any(dim=0))
731
+ kps = kps[:, used_kps, :]
732
+ # logger.info(f'Final number of used key points: {kps.size(1)}')
733
+ return files, kps, None, used_kps
734
+
735
+
736
+ def load_eval_data(args, path, category, split):
737
+ # if args.EVAL_DATASET == 'ap10k':
738
+ # files, kps, thresholds, used_kps = load_ap10k_data(path, args.ANNO_SIZE, category, split, args.TEST_SAMPLE)
739
+ print(f"Loading evaluation data for dataset: {args.EVAL_DATASET}, category: {category}, split: {split}, test sample: {args.TEST_SAMPLE}")
740
+ if args.EVAL_DATASET == 'pascal':
741
+ files, kps, thresholds, used_kps = load_pascal_data(path, args.ANNO_SIZE, category, split, args.TEST_SAMPLE)
742
+ elif args.EVAL_DATASET == 'spair':
743
+ files, kps, thresholds, used_kps = load_spair_data(path, args.ANNO_SIZE, category, split, args.TEST_SAMPLE)
744
+
745
+ return files, kps, thresholds, used_kps
746
+
747
+
748
+ ###### plot helper
749
+ from PIL import Image, ImageDraw, ImageFont
750
+
751
+ def draw_bbox_point_grid(
752
+ image,
753
+ bbox=None,
754
+ point=None,
755
+ box_color=(0, 255, 0),
756
+ pt_color=(255, 0, 0),
757
+ width=5,
758
+ draw_grid=False,
759
+ step=50, # pixels between grid lines
760
+ grid_color=(255, 255, 255),
761
+ grid_width=1,
762
+ add_text=True,
763
+ dilation=28
764
+ ):
765
+ """Draw bbox, point, and optional grid on a PIL image.
766
+
767
+ Args
768
+ ----
769
+ image (PIL.Image): target image (modified in place if not copied).
770
+ bbox (list | tuple): [x1, y1, x2, y2] or None.
771
+ point (tuple): (x, y) or None.
772
+ color (tuple): RGB for bbox / point.
773
+ width (int): line width for bbox.
774
+ draw_grid (bool): enable/disable grid.
775
+ step (int): grid spacing in pixels.
776
+ grid_color (tuple): RGB for grid.
777
+ grid_width (int): line width for grid.
778
+ """
779
+ draw = ImageDraw.Draw(image)
780
+
781
+ if dilation > 0 and bbox is not None:
782
+ # Dilation logic: expand bbox by dilation pixels
783
+ x1, y1, x2, y2 = bbox
784
+ bbox = (x1 - dilation, y1 - dilation, x2 + dilation, y2 + dilation)
785
+
786
+ # ── draw grid ───────────────────────────────────────────
787
+ if draw_grid and step > 0:
788
+ w, h = image.size
789
+ # vertical lines
790
+ for x in range(0, w, step):
791
+ draw.line([(x, 0), (x, h)], fill=grid_color, width=grid_width)
792
+ # horizontal lines
793
+ for y in range(0, h, step):
794
+ draw.line([(0, y), (w, y)], fill=grid_color, width=grid_width)
795
+
796
+ # ── draw bbox ──────────────────────────────────────────
797
+ if bbox is not None:
798
+ draw.rectangle(bbox, outline=box_color, width=width)
799
+
800
+ # ── draw point ─────────────────────────────────────────
801
+ if point is not None:
802
+ radius = 20
803
+ x, y = point
804
+ draw.ellipse(
805
+ (x - radius, y - radius, x + radius, y + radius),
806
+ fill=pt_color
807
+ )
808
+ # add a white text at the center of the point
809
+ # add a white text at the center of the point
810
+ if add_text:
811
+ text = "Ref"
812
+ # Try to use a better font, or fall back to the default if not found
813
+ # try:
814
+ font = ImageFont.truetype("DejaVuSans.ttf", size=26)
815
+ # except IOError:
816
+ # print('test')
817
+ # font = ImageFont.load_default()
818
+
819
+ # Get text bounding box for centering
820
+ print(font)
821
+ bbox_text = draw.textbbox((0, 0), text, font=font)
822
+ text_width = bbox_text[2] - bbox_text[0]
823
+ text_height = bbox_text[3] - bbox_text[1]
824
+
825
+ text_x = x - text_width // 2
826
+ text_y = y - text_height // 2
827
+ draw.text((text_x, text_y), text, font=font, fill=(255, 255, 255), text_anchor = "mm")
828
+
829
+
830
+ return image
831
+
832
+
833
+ def square_bbox_to_multiple(bbox, multiple=14):
834
+ """
835
+ Expand a rectangle to the smallest *square* box whose side length
836
+
837
+ 1. is at least the longer side of `bbox`, and
838
+ 2. is the nearest *upper* multiple of `multiple`.
839
+
840
+ The square is centred on the original box.
841
+
842
+ Parameters
843
+ ----------
844
+ bbox : list | tuple
845
+ [x1, y1, x2, y2] (top-left / bottom-right in pixels)
846
+ multiple : int, optional
847
+ Grid size to snap the side length to (default: 14).
848
+
849
+ Returns
850
+ -------
851
+ list
852
+ [new_x1, new_y1, new_x2, new_y2] square bounding box.
853
+ """
854
+ x1, y1, x2, y2 = map(float, bbox)
855
+ w, h = x2 - x1, y2 - y1
856
+
857
+ # 1) choose the longer side, then round **up** to nearest multiple
858
+ side = math.ceil(max(w, h) / multiple) * multiple
859
+
860
+ # 2) centre the square on the original box
861
+ cx, cy = (x1 + x2) / 2, (y1 + y2) / 2
862
+ half = side / 2
863
+
864
+ new_x1 = int(round(cx - half))
865
+ new_y1 = int(round(cy - half))
866
+ new_x2 = new_x1 + side
867
+ new_y2 = new_y1 + side
868
+
869
+ return [new_x1, new_y1, new_x2, new_y2]
Code/sc_dit/utils_actor_critic.py ADDED
@@ -0,0 +1,408 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from typing import Optional, Dict, Any
3
+ from PIL import Image
4
+ import base64
5
+ import io
6
+ from typing import Tuple
7
+ from PIL import ImageDraw
8
+ import re
9
+ import os
10
+
11
+ def _actor_prompt(
12
+ x: int,
13
+ y: int,
14
+ class_name: str,
15
+ feedback: Optional[str] = None,
16
+ previous_json: Optional[Dict[str, Any]] = None,
17
+ orientation_hint: Optional[str] = None, # NEW ARG
18
+ ) -> str:
19
+ """
20
+ Generate the prompt for the actor (GPT-4o vision-enabled).
21
+ """
22
+
23
+ if feedback and previous_json:
24
+ prev_str = json.dumps(previous_json, ensure_ascii=False, indent=2)
25
+ return (
26
+ f"Critic feedback: \"{feedback}\"\n\n"
27
+ "Here is your previous response:\n"
28
+ f"{prev_str}\n\n"
29
+ f"The query point is at pixel coordinates (x={x}, y={y}) in a 840×840 image.\n"
30
+ "Please return a corrected JSON object with the **same keys**. Only modify what is incorrect.\n"
31
+ "**Strictly return one valid JSON block only. No markdown, no explanation.**"
32
+ )
33
+
34
+ # --- Base prompt for first iteration --- #
35
+ orientation_context = (
36
+ f"Note: The object’s canonical front-facing direction is toward the {orientation_hint} side of the image.\n\n"
37
+ if orientation_hint else ""
38
+ )
39
+
40
+ return (
41
+ f"# Task: Describe the part of a {class_name} touched by the keypoint.\n\n"
42
+ f"Image resolution: 840 × 840 pixels\n"
43
+ f"Keypoint location: (x={x}, y={y})\n"
44
+ f"{orientation_context}"
45
+ "You will output a JSON object describing:\n"
46
+ "- What the part is\n"
47
+ "- Where it is located **within the object**\n"
48
+ "- Which direction the whole object is facing in the image\n"
49
+ "- A bounding box that tightly encloses the part and includes the keypoint\n"
50
+ "- Confidence and visibility status\n\n"
51
+ "## Required JSON fields (with strict meanings):\n\n"
52
+ "1. **part_name** (string)\n"
53
+ " - What it describes: The specific component name touched by the keypoint\n"
54
+ " - Frame of reference: identity only\n"
55
+ " - Example: 'propeller', 'left aileron', 'starboard hull'\n\n"
56
+
57
+ "2. **orientation** (string)\n"
58
+ " - What it describes: A concise directional label of where the part is located **within the object**\n"
59
+ " - Frame of reference: object-relative (not camera)\n"
60
+ " - Example: 'front-left', 'upper-rear', 'center'\n\n"
61
+
62
+ "3. **spatial_location** (string)\n"
63
+ " - What it describes: A free-form natural language phrase describing the part’s position within the object\n"
64
+ " - Frame of reference: object-relative\n"
65
+ " - Example: 'rear edge of the left wing', 'centerline between the hulls'\n\n"
66
+
67
+ "4. **object_facing_direction** (string)\n"
68
+ " - What it describes: Which direction the **entire object** is facing in the image\n"
69
+ " - Frame of reference: camera-relative (viewer’s perspective)\n"
70
+ " - Example: 'facing the viewer', 'top-down', 'facing right'\n\n"
71
+
72
+ "5. **visibility_notes** (string)\n"
73
+ " - What it describes: Visibility condition of the part in the current view\n"
74
+ " - Example: 'fully visible', 'partially occluded', 'mostly obscured'\n\n"
75
+
76
+ "6. **proposed_bbox** ([x1, y1, x2, y2])\n"
77
+ " - What it describes: Tight bounding box around the identified part, in pixel coordinates\n"
78
+ f" - Must strictly contain the keypoint (x={x}, y={y}) ⇒ x1 < {x} < x2 and y1 < {y} < y2\n\n"
79
+
80
+ "7. **bbox_confidence** (float)\n"
81
+ " - What it describes: Your confidence (between 0 and 1) that the box tightly encloses the described part\n\n"
82
+
83
+ "## Output Format:\n"
84
+ "Return ONLY the following JSON object:\n"
85
+ "{\n"
86
+ ' "part_name": string,\n'
87
+ ' "orientation": string, // object-relative (e.g. "front-left")\n'
88
+ ' "spatial_location": string, // object-relative free-form description\n'
89
+ ' "object_facing_direction": string, // camera-relative (e.g. "facing viewer")\n'
90
+ ' "visibility_notes": string, // fully or partially visible\n'
91
+ ' "proposed_bbox": [x1, y1, x2, y2],\n'
92
+ ' "bbox_confidence": float // 0–1\n'
93
+ "}\n\n"
94
+ "**Do NOT include markdown, extra comments, or explanations. Return only the JSON object.**"
95
+ )
96
+
97
+
98
+ def _critic_prompt(
99
+ desc: dict,
100
+ class_name: str,
101
+ x: int,
102
+ y: int,
103
+ orientation_hint: str | None = None,
104
+ hypothesis: str | None = None,
105
+ ) -> str:
106
+ blob = json.dumps(desc, ensure_ascii=False, indent=2)
107
+
108
+ orientation_txt = f"Orientation: {orientation_hint}.\n" if orientation_hint else ""
109
+ hypothesis_txt = f"My hypothesis: \"{hypothesis}\"\n" if hypothesis else ""
110
+
111
+ return (
112
+ "You are an expert vision auditor.\n\n"
113
+ "Image 1: a crop focused on the predicted part region.\n"
114
+ "Image 2: the full image with red dot and blue bounding box overlay.\n"
115
+ " - Red dot = query keypoint\n"
116
+ " - Blue box = predicted bounding box\n\n"
117
+ f"Object class: {class_name}\n"
118
+ f"Key-point: (x={x}, y={y}) in a 840×840 image.\n"
119
+ f"{orientation_txt}{hypothesis_txt}\n"
120
+ "Actor-supplied JSON:\n"
121
+ f"{blob}\n\n"
122
+ "Check each field in order and STOP at the first inconsistency:\n"
123
+ "1. **part_name** — Must match the visible component inside the box.\n"
124
+ "2. **orientation** — Must be object-relative (e.g., front-left) and accurate.\n"
125
+ "3. **spatial_location** — Must be a plausible free-form object-relative phrase.\n"
126
+ "4. **object_facing_direction** — Must be camera-relative and visually supported.\n"
127
+ "5. **visibility_notes** — Must match visual visibility.\n"
128
+ "6. **proposed_bbox** — Must tightly contain the part *and* include the keypoint (x1 < x < x2, y1 < y < y2).\n"
129
+ "7. **bbox_confidence** — Must be a float between 0 and 1.\n\n"
130
+ "Respond with ONE JSON only:\n"
131
+ '{"is_consistent": true | false, "reason": "concise explanation of the first error"}'
132
+ )
133
+
134
+ def _encode_image_to_data_url(img: Image.Image, fmt: str = "PNG") -> str:
135
+ """Convert PIL image → base64 data-URL suitable for GPT-4o vision."""
136
+ buf = io.BytesIO()
137
+ img.save(buf, format=fmt)
138
+ b64 = base64.b64encode(buf.getvalue()).decode("ascii")
139
+ return f"data:image/{fmt.lower()};base64,{b64}"
140
+
141
+
142
+
143
+ def _make_overlay(
144
+ image: Image.Image,
145
+ point: Tuple[int, int],
146
+ bbox: list,
147
+ dot_radius: int = 6
148
+ ) -> Image.Image:
149
+ """Return a copy of `image` with a red dot and a blue bbox."""
150
+ x, y = point
151
+ x1, y1, x2, y2 = bbox
152
+ out = image.copy()
153
+ draw = ImageDraw.Draw(out)
154
+ draw.ellipse([x-dot_radius, y-dot_radius, x+dot_radius, y+dot_radius],
155
+ fill="red", outline="white", width=2)
156
+ draw.rectangle([x1, y1, x2, y2], outline="blue", width=2)
157
+ return out
158
+
159
+
160
+ _JSON_RE = re.compile(r"\{[\s\S]*\}")
161
+
162
+
163
+ def _extract_json(block: str) -> Optional[Dict[str, Any]]:
164
+ """Grab first {...} block and parse; None if invalid."""
165
+ m = _JSON_RE.search(block)
166
+ if not m:
167
+ return None
168
+ try:
169
+ return json.loads(m.group(0))
170
+ except json.JSONDecodeError:
171
+ return None
172
+
173
+
174
+ # def save_actor_critic_output(
175
+ # save_dir: str,
176
+ # category: str,
177
+ # image_id: str,
178
+ # kpt_id: int,
179
+ # query_point: Tuple[int, int],
180
+ # actor_desc: Dict[str, Any],
181
+ # critic_report: Dict[str, Any],
182
+ # model_name: str,
183
+ # ):
184
+ # """
185
+ # Saves the actor and critic outputs to a structured JSON file for a given image.
186
+
187
+ # This function uses a read-modify-write pattern. It loads an existing JSON if one
188
+ # exists for the image, adds or updates the data for the given keypoint, and then
189
+ # saves the file.
190
+
191
+ # Args:
192
+ # save_dir (str): The root directory for saving results (e.g., './results_vlm/').
193
+ # category (str): The object category (e.g., 'car'), used for subdirectories.
194
+ # image_id (str): The filename of the image (e.g., '0001.jpg').
195
+ # kpt_id (int): The unique ID of the keypoint being described.
196
+ # query_point (Tuple[int, int]): The (x, y) coordinates of the keypoint.
197
+ # actor_desc (Dict[str, Any]): The JSON output from the actor model.
198
+ # critic_report (Dict[str, Any]): The JSON output from the critic model.
199
+ # model_name (str): The name of the OpenAI model used.
200
+ # """
201
+ # # 1. Create the directory path (e.g., ./results_vlm/car/)
202
+ # category_dir = os.path.join(save_dir, category)
203
+ # os.makedirs(category_dir, exist_ok=True)
204
+
205
+ # # 2. Define the output file path
206
+ # image_basename = os.path.splitext(os.path.basename(image_id))[0]
207
+ # json_path = os.path.join(category_dir, f"{image_basename}.json")
208
+
209
+ # # 3. Load existing data or initialize a new structure
210
+ # if os.path.exists(json_path):
211
+ # with open(json_path, 'r') as f:
212
+ # data = json.load(f)
213
+ # else:
214
+ # data = {
215
+ # "metadata": {
216
+ # "image_id": image_id,
217
+ # "category": category,
218
+ # "model_name": model_name,
219
+ # },
220
+ # "keypoints": {}
221
+ # }
222
+
223
+ # # 4. Add the new keypoint data
224
+ # # Use str(kpt_id) because JSON keys must be strings.
225
+ # data["keypoints"][str(kpt_id)] = {
226
+ # "query_point": query_point,
227
+ # "actor_output": actor_desc,
228
+ # "critic_output": critic_report,
229
+ # }
230
+
231
+ # # 5. Write the updated data back to the file
232
+ # with open(json_path, 'w') as f:
233
+ # json.dump(data, f, indent=4)
234
+
235
+ # # print(f"Saved annotation for kpt_id {kpt_id} to {json_path}")
236
+
237
+
238
+ import numpy as np
239
+ import collections.abc
240
+
241
+ def sanitize_for_json(obj):
242
+ """
243
+ Recursively traverses a dictionary or list and converts any
244
+ NumPy number types to their Python native equivalents.
245
+ This is essential for ensuring data is JSON serializable.
246
+ """
247
+ if isinstance(obj, (str, bytes)):
248
+ return obj
249
+ if isinstance(obj, collections.abc.Mapping):
250
+ return {k: sanitize_for_json(v) for k, v in obj.items()}
251
+ if isinstance(obj, collections.abc.Iterable):
252
+ return [sanitize_for_json(item) for item in obj]
253
+ if isinstance(obj, (np.integer, np.int64)):
254
+ return int(obj)
255
+ if isinstance(obj, (np.floating, np.float32, np.float64)):
256
+ return float(obj)
257
+ return obj
258
+
259
+
260
+ def save_actor_critic_output(
261
+ save_dir: str,
262
+ category: str,
263
+ image_id: str,
264
+ kpt_id: int,
265
+ query_point: Tuple[int, int],
266
+ actor_desc: Dict[str, Any],
267
+ critic_report: Dict[str, Any],
268
+ model_name: str,
269
+ ):
270
+ # ... (path creation logic is the same) ...
271
+ category_dir = os.path.join(save_dir, category)
272
+ os.makedirs(category_dir, exist_ok=True)
273
+ image_basename = os.path.splitext(os.path.basename(image_id))[0]
274
+ json_path = os.path.join(category_dir, f"{image_basename}.json")
275
+
276
+ # --- Robust Loading ---
277
+ data = None
278
+ if os.path.exists(json_path):
279
+ try:
280
+ with open(json_path, 'r') as f:
281
+ data = json.load(f)
282
+ except json.JSONDecodeError:
283
+ print(f"Warning: Corrupted JSON file at {json_path}. Overwriting.")
284
+ data = None
285
+
286
+ if data is None:
287
+ data = {"metadata": {"image_id": image_id, "category": category, "model_name": model_name}, "keypoints": {}}
288
+
289
+ # --- NEW: Robust Writing - Sanitize the data before adding it ---
290
+ clean_actor_desc = sanitize_for_json(actor_desc)
291
+ clean_critic_report = sanitize_for_json(critic_report)
292
+
293
+ # print('this is the cleaned critic report', clean_critic_report)
294
+
295
+ # Add the sanitized keypoint data
296
+ data["keypoints"][str(kpt_id)] = {
297
+ "query_point": query_point, # Already clean from the main loop
298
+ "actor_output": clean_actor_desc,
299
+ "critic_output": clean_critic_report,
300
+ }
301
+
302
+ # Write the updated data back to the file
303
+ with open(json_path, 'w') as f:
304
+ json.dump(data, f, indent=4)
305
+
306
+
307
+ #
308
+ # NEW FUNCTION TO CHECK FOR COMPLETED WORK
309
+ #
310
+ def check_if_done(save_dir: str, category: str, image_id: str, kpt_id: int) -> bool:
311
+ """
312
+ Checks if a keypoint has already been processed and saved with non-empty content.
313
+
314
+ Args:
315
+ save_dir (str): The root directory where results are saved.
316
+ category (str): The object category.
317
+ image_id (str): The filename of the image.
318
+ kpt_id (int): The keypoint ID to check.
319
+
320
+ Returns:
321
+ bool: True if the keypoint exists and has content, False otherwise.
322
+ """
323
+ # 1. Construct the expected path to the JSON file
324
+ category_dir = os.path.join(save_dir, category)
325
+ image_basename = os.path.splitext(os.path.basename(image_id))[0]
326
+ json_path = os.path.join(category_dir, f"{image_basename}.json")
327
+
328
+ # 2. If the file doesn't exist, it's definitely not done.
329
+ if not os.path.exists(json_path):
330
+ return False
331
+
332
+ try:
333
+ # 3. Read the existing JSON file
334
+ with open(json_path, 'r') as f:
335
+ data = json.load(f)
336
+
337
+ # 4. Check for the keypoint ID (as a string key)
338
+ kpt_entry = data.get("keypoints", {}).get(str(kpt_id))
339
+
340
+ if not kpt_entry:
341
+ # Keypoint not found in the file
342
+ return False
343
+
344
+ # 5. Check that the content is non-empty.
345
+ # We check if both actor and critic outputs exist and are not empty dictionaries.
346
+ actor_output = kpt_entry.get("actor_output")
347
+ critic_output = kpt_entry.get("critic_output")
348
+
349
+ if actor_output and critic_output:
350
+ # Both exist and are not empty/None, so we consider it done.
351
+ return True
352
+ else:
353
+ # One or both are missing or empty
354
+ return False
355
+
356
+ except (json.JSONDecodeError, FileNotFoundError):
357
+ # If the file is corrupted or can't be opened, treat it as not done.
358
+ # The robust saving function will overwrite it later.
359
+ print(f"Warning: Could not read or decode {json_path}. Will re-process.")
360
+ return False
361
+
362
+ return False
363
+
364
+
365
+ def get_existing_result(save_dir: str, category: str, image_id: str, kpt_id: int) -> Optional[Dict[str, Any]]:
366
+ """
367
+ Checks if a keypoint has already been processed and, if so, returns the saved data.
368
+
369
+ Args:
370
+ save_dir (str): The root directory where results are saved.
371
+ category (str): The object category.
372
+ image_id (str): The filename of the image.
373
+ kpt_id (int): The keypoint ID to check.
374
+
375
+ Returns:
376
+ Optional[Dict[str, Any]]: The dictionary for the keypoint entry if it exists
377
+ and is valid, otherwise None.
378
+ """
379
+ # 1. Construct the expected path to the JSON file
380
+ category_dir = os.path.join(save_dir, category)
381
+ image_basename = os.path.splitext(os.path.basename(image_id))[0]
382
+ json_path = os.path.join(category_dir, f"{image_basename}.json")
383
+
384
+ if not os.path.exists(json_path):
385
+ return None
386
+
387
+ try:
388
+ with open(json_path, 'r') as f:
389
+ data = json.load(f)
390
+
391
+ kpt_entry = data.get("keypoints", {}).get(str(kpt_id))
392
+
393
+ if not kpt_entry:
394
+ return None
395
+
396
+ # Check that the content is non-empty and valid
397
+ actor_output = kpt_entry.get("actor_output")
398
+ critic_output = kpt_entry.get("critic_output")
399
+
400
+ if actor_output and critic_output:
401
+ # Success! Return the entire entry for this keypoint.
402
+ return kpt_entry
403
+ else:
404
+ return None
405
+
406
+ except (json.JSONDecodeError, FileNotFoundError):
407
+ print(f"Warning: Could not read or decode {json_path}. Will re-process.")
408
+ return None
Code/sc_dit/utils_actor_critic_with_ort.py ADDED
@@ -0,0 +1,445 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from typing import Optional, Dict, Any, List
3
+ from PIL import Image
4
+ import base64
5
+ import io
6
+ from typing import Tuple
7
+ from PIL import ImageDraw
8
+ import re
9
+ import os
10
+ import numpy as np
11
+ import collections.abc
12
+ from part_dictionary import PART_SCHEMA
13
+
14
+ def get_all_parts_from_schema(schema_level: dict) -> List[str]:
15
+ """Recursively traverses the schema to get a flat list of all part names."""
16
+ part_names = []
17
+ for part_name, properties in schema_level.items():
18
+ part_names.append(part_name)
19
+ # If the part has children, recurse into them
20
+ if "children" in properties and properties["children"]:
21
+ part_names.extend(get_all_parts_from_schema(properties["children"]))
22
+ return part_names
23
+
24
+
25
+ def _actor_prompt(
26
+ x: int,
27
+ y: int,
28
+ class_name: str,
29
+ feedback: Optional[str] = None,
30
+ previous_json: Optional[dict] = None,
31
+ orientation_hint: Optional[dict] = None,
32
+ part_schema: dict = PART_SCHEMA,
33
+ ) -> str:
34
+ """
35
+ Generates a state-aware prompt for the actor, ensuring both Exploration
36
+ and Refinement modes have the necessary context.
37
+ """
38
+
39
+ # --- Define the JSON response format once to avoid repetition ---
40
+ JSON_FORMAT_TEMPLATE = (
41
+ "## RESPONSE FORMAT\n\n"
42
+ "Return ONLY a valid JSON object with the following structure:\n"
43
+ "{\n"
44
+ ' "part_name": string,\n'
45
+ ' "part_location": string, // OBJECT-centric, e.g., "front-left"\n'
46
+ ' "spatial_location": string, // OBJECT-centric free-form phrase\n'
47
+ ' "object_facing_direction": string, // camera-relative, e.g., "toward viewer"\n'
48
+ ' "visibility_notes": string,\n'
49
+ ' "proposed_bbox": [x1, y1, x2, y2], //\n'
50
+ ' "bbox_confidence": float\n'
51
+ "}"
52
+ )
53
+
54
+ # The context block with core rules is needed in BOTH modes.
55
+ context_block = _create_context_block(class_name, orientation_hint)
56
+
57
+ class_specific_schema = part_schema.get(class_name, {})
58
+ vocabulary_list = []
59
+ if class_specific_schema:
60
+ all_parts = get_all_parts_from_schema(class_specific_schema)
61
+ vocabulary_list = sorted(list(set(all_parts)))
62
+
63
+ part_selection_guide = (
64
+ f"## Part Selection Guide\n\n"
65
+ f"**1. Reference Vocabulary:**\n"
66
+ f"Your `part_name` choice IS ENCOURAGED come from this list: {vocabulary_list}\n\n"
67
+ f"**2. Selection Rule:**\n"
68
+ f"Aim for the most specific part name possible from the vocabulary. If you are uncertain, a more general parent part is acceptable."
69
+ )
70
+
71
+ # --- REFINEMENT MODE (An anchor part has been confirmed) ---
72
+ if feedback and previous_json:
73
+ return (
74
+ f"# Original TASK: Describe the part of a {class_name} at keypoint (x={x}, y={y}).\n\n"
75
+ f"Previous json: {json.dumps(previous_json, indent=2) if previous_json else 'None'}\n\n"
76
+ f"**Critic's Last Feedback:** \"{feedback}\"\n\n"
77
+ f"{context_block} # <-- CRITICAL ADDITION: Provide the rules for geometric reasoning.\n\n"
78
+ "The bounding box should enclose the part in the center and includes the keypoint.\n\n"
79
+ f"{part_selection_guide}\n"
80
+ f"---\n"
81
+ f"{JSON_FORMAT_TEMPLATE}"
82
+ )
83
+
84
+ # --- EXPLORATION MODE (No anchor, this is the first attempt or a reset) ---
85
+ else:
86
+ return (
87
+ f"# TASK: Describe the part of a {class_name} at keypoint (x={x}, y={y}).\n\n"
88
+ f"Use the context and guide below to form your answer. A bounding box should tightly enclose the part and includes the keypoint.\n\n"
89
+ f"{context_block}\n\n"
90
+ "The bounding box should enclose the part in the center and includes the keypoint.\n\n"
91
+ f"{part_selection_guide}\n"
92
+ f"---\n"
93
+ f"{JSON_FORMAT_TEMPLATE}"
94
+ )
95
+
96
+
97
+ def _critic_prompt(
98
+ desc: dict,
99
+ class_name: str,
100
+ x: int,
101
+ y: int,
102
+ part_schema: dict,
103
+ orientation_hint: Optional[dict] = None,
104
+ anchor_part: Optional[str] = None, # NEW: Anchor part for sibling checks
105
+ img_size: int = 840, # Default image size for bounding box calculations
106
+ #... other args
107
+ ) -> str:
108
+ """
109
+ Generates the final, streamlined prompt for the critic, guiding it
110
+ to produce intelligent, hierarchical feedback.
111
+ """
112
+ actor_part_name = desc.get("part_name", "unknown")
113
+ class_schema = part_schema.get(class_name, {})
114
+
115
+ # --- Prepare all necessary data for the dynamic prompt ---
116
+ top_level_parts = list(class_schema.keys())
117
+ part_info, _ = find_part_in_schema(class_schema, actor_part_name)
118
+
119
+ # Prepare lists for feedback
120
+ go_deeper_options = list(part_info["children"].keys()) if part_info and part_info.get("children") else [] # hint of whether it is specific enough
121
+ check_other_children_options = []
122
+ if anchor_part:
123
+ anchor_info, _ = find_part_in_schema(class_schema, anchor_part)
124
+ if anchor_info and anchor_info.get("children"):
125
+ check_other_children_options = list(anchor_info["children"].keys())
126
+
127
+ # --- NEW: Generate the conditional geometric instruction ---
128
+ is_symmetric = part_info.get("is_symmetric", False) if part_info else False
129
+ if is_symmetric:
130
+ geometric_audit_instruction = (
131
+ "This is a **symmetric part**. Your primary task is to verify the left/right description "
132
+ "in `part_location` by strictly applying the Golden Rule to the visual evidence."
133
+ )
134
+ else:
135
+ geometric_audit_instruction = (
136
+ "This is a **singular/central part**. Your primary task is to verify its position along "
137
+ "the main axes (e.g., front, rear, top, center) as described in `part_location`."
138
+ )
139
+
140
+ context_block = _create_context_block(class_name, orientation_hint)
141
+
142
+ return (
143
+ f"You are a vision auditor. Follow the decision tree below to verify the actor's proposal. Show the reasoning traces (which passed and which does not) in the final answer as `reason` key.\n\n"
144
+ "=== DECISION PRIORITY (must follow) ==="
145
+ "1) Part identity"
146
+ "2) Geometry (facing/side/front-rear/vertical)"
147
+ "3) Visibility"
148
+ "4) Approved"
149
+ f"## Context & Evidence\n"
150
+ "Image 1: a crop focused on the predicted part region.\n"
151
+ "Image 2: the full image with red dot and blue bounding box overlay.\n"
152
+ " - Red dot = query keypoint\n"
153
+ " - Blue box = predicted bounding box\n\n"
154
+ f"Object class: {class_name}\n"
155
+ f"Key-point: (x={x}, y={y}) in a {img_size}x{img_size} image.\n"
156
+ f"**Actor's Proposal:**\n{json.dumps(desc, indent=2)}\n"
157
+ f"**Current Anchor Part:** {anchor_part or 'None (top-level)'}\n"
158
+ f"**Rules:**\n{context_block}\n\n"
159
+ f"## AUDIT DECISION TREE (Follow Strictly)\n\n"
160
+ f"**1. First, audit the `part_name` ('{actor_part_name}').**\n"
161
+ f"1a) CORRECT & SPECIFIC → **Approved (Part)** "
162
+ f" - The named part matches the component at the keypoint."
163
+ f" - It is already specific enough, or there are no children to refine (go_deeper_options = [])."
164
+ f"1b) CORRECT BUT TOO GENERAL → **Go Deeper**"
165
+ f" - The named part is a valid parent, but there is a more specific child clearly indicated."
166
+ f" - Suggest children to try next: {go_deeper_options}."
167
+ f"1c) INCORRECT & TOP-LEVEL NOT FIXED → **Go Back** "
168
+ f"- The named part is wrong and we do NOT have a confirmed parent (“anchor”)."
169
+ f"- Suggest trying one of the top-level parents: {top_level_parts}."
170
+ f"1d) INCORRECT BUT PARENT (ANCHOR) IS KNOWN → **Check Other Children** "
171
+ f"- The named part is wrong, but the confirmed parent (“anchor”) is correct."
172
+ f"- Suggest trying one of the anchor’s children: {check_other_children_options}."
173
+ f"**2. If `part_name` passed, audit the GEOMETRY.**\n"
174
+ f" - **Your specific instruction for this part is:** {geometric_audit_instruction}\n"
175
+ f" - Following this instruction, is the actor's `part_location` correct? If not, respond with `feedback_type: 'Geometric Error'` and a concise reason.\n\n"
176
+ f"**3. If geometry passed, audit the VISIBILITY.**\n"
177
+ f" - Check the `visibility_notes`. Is it consistent with the part's location? If not, respond with `feedback_type: 'Visibility Error'`.\n\n"
178
+ f"**4. If all checks pass, the proposal is APPROVED.**\n"
179
+ f" - Respond with `feedback_type: 'Approved'` and set `is_consistent` to `true`.\n\n"
180
+ f"---\n"
181
+ f"## YOUR RESPONSE\n"
182
+ f"Respond with ONE valid JSON object with the mandatory `feedback_type`."
183
+ )
184
+
185
+
186
+ def find_part_in_schema(schema_level: dict, part_name_to_find: str, parent_name: str = None):
187
+ """
188
+ Recursively searches the nested schema for a part.
189
+ Returns the part's properties and its immediate parent's name.
190
+ """
191
+ # Check keys at the current level
192
+ if part_name_to_find in schema_level:
193
+ return schema_level[part_name_to_find], parent_name
194
+
195
+ # If not found, search in the children of each part at this level
196
+ for current_part_name, properties in schema_level.items():
197
+ if "children" in properties and properties["children"]:
198
+ found_info, found_parent = find_part_in_schema(
199
+ properties["children"], part_name_to_find, parent_name=current_part_name
200
+ )
201
+ if found_info:
202
+ return found_info, found_parent
203
+
204
+ return None, None
205
+
206
+
207
+ def _create_context_block(
208
+ class_name: str,
209
+ orientation_hint: Optional[dict] = None
210
+ ) -> str:
211
+ """
212
+ Consolidates all rules and hints into a single, clean markdown block
213
+ to be used as context for the VLM.
214
+ """
215
+ # 1. Format the orientation hint
216
+ hint_str = json.dumps(orientation_hint) if orientation_hint else "Not available."
217
+
218
+ # 2. Define the core geometric principles
219
+ golden_rule = (
220
+ "- If facing 'toward viewer'**: It's a mirror. A part on the **viewer's left** is the **object's right**, and a part on the **viewer's right** is the **object's left**.\n"
221
+ "- **If facing 'away from viewer'**: It's direct. Image-left is object-left.\n"
222
+ # "- **If facing 'left' or 'right'**: Use near-side/far-side logic based on the object's orientation."
223
+ )
224
+
225
+ # 3. Assemble all pieces into the final markdown block
226
+ context = (
227
+ f"## CONTEXT & RULES\n\n"
228
+ f"**1. Object Class:** `{class_name}`\n"
229
+ f"**2. Orientation Prior (Hint):** `{hint_str}`\n"
230
+ f"**3. The Golden Rule of Geometry:**\n{golden_rule}\n"
231
+ )
232
+
233
+ return context
234
+
235
+
236
+ def _encode_image_to_data_url(img: Image.Image, fmt: str = "PNG") -> str:
237
+ """Convert PIL image → base64 data-URL suitable for GPT-4o vision."""
238
+ buf = io.BytesIO()
239
+ img.save(buf, format=fmt)
240
+ b64 = base64.b64encode(buf.getvalue()).decode("ascii")
241
+ return f"data:image/{fmt.lower()};base64,{b64}"
242
+
243
+
244
+
245
+ def _make_overlay(
246
+ image: Image.Image,
247
+ point: Tuple[int, int],
248
+ bbox: list,
249
+ dot_radius: int = 6
250
+ ) -> Image.Image:
251
+ """Return a copy of `image` with a red dot and a blue bbox."""
252
+ x, y = point
253
+ x1, y1, x2, y2 = bbox
254
+ out = image.copy()
255
+ draw = ImageDraw.Draw(out)
256
+ draw.ellipse([x-dot_radius, y-dot_radius, x+dot_radius, y+dot_radius],
257
+ fill="red", outline="white", width=2)
258
+ draw.rectangle([x1, y1, x2, y2], outline="blue", width=2)
259
+ return out
260
+
261
+
262
+ _JSON_RE = re.compile(r"\{[\s\S]*\}")
263
+
264
+
265
+ def _extract_json(block: str) -> Optional[Dict[str, Any]]:
266
+ """Grab first {...} block and parse; None if invalid."""
267
+ m = _JSON_RE.search(block)
268
+ if not m:
269
+ return None
270
+ try:
271
+ return json.loads(m.group(0))
272
+ except json.JSONDecodeError:
273
+ return None
274
+
275
+
276
+
277
+
278
+ def sanitize_for_json(obj):
279
+ """
280
+ Recursively traverses a dictionary or list and converts any
281
+ NumPy number types to their Python native equivalents.
282
+ This is essential for ensuring data is JSON serializable.
283
+ """
284
+ if isinstance(obj, (str, bytes)):
285
+ return obj
286
+ if isinstance(obj, collections.abc.Mapping):
287
+ return {k: sanitize_for_json(v) for k, v in obj.items()}
288
+ if isinstance(obj, collections.abc.Iterable):
289
+ return [sanitize_for_json(item) for item in obj]
290
+ if isinstance(obj, (np.integer, np.int64)):
291
+ return int(obj)
292
+ if isinstance(obj, (np.floating, np.float32, np.float64)):
293
+ return float(obj)
294
+ return obj
295
+
296
+
297
+ def save_actor_critic_output(
298
+ save_dir: str,
299
+ category: str,
300
+ image_id: str,
301
+ kpt_id: int,
302
+ query_point: Tuple[int, int],
303
+ actor_desc: Dict[str, Any],
304
+ critic_report: Dict[str, Any],
305
+ model_name: str,
306
+ ):
307
+ # ... (path creation logic is the same) ...
308
+ category_dir = os.path.join(save_dir, category)
309
+ os.makedirs(category_dir, exist_ok=True)
310
+ image_basename = os.path.splitext(os.path.basename(image_id))[0]
311
+ json_path = os.path.join(category_dir, f"{image_basename}.json")
312
+
313
+ # --- Robust Loading ---
314
+ data = None
315
+ if os.path.exists(json_path):
316
+ try:
317
+ with open(json_path, 'r') as f:
318
+ data = json.load(f)
319
+ except json.JSONDecodeError:
320
+ print(f"Warning: Corrupted JSON file at {json_path}. Overwriting.")
321
+ data = None
322
+
323
+ if data is None:
324
+ data = {"metadata": {"image_id": image_id, "category": category, "model_name": model_name}, "keypoints": {}}
325
+
326
+ # --- NEW: Robust Writing - Sanitize the data before adding it ---
327
+ clean_actor_desc = sanitize_for_json(actor_desc)
328
+ clean_critic_report = sanitize_for_json(critic_report)
329
+
330
+ # print('this is the cleaned critic report', clean_critic_report)
331
+
332
+ # Add the sanitized keypoint data
333
+ data["keypoints"][str(kpt_id)] = {
334
+ "query_point": query_point, # Already clean from the main loop
335
+ "actor_output": clean_actor_desc,
336
+ "critic_output": clean_critic_report,
337
+ }
338
+
339
+ # Write the updated data back to the file
340
+ with open(json_path, 'w') as f:
341
+ json.dump(data, f, indent=4)
342
+
343
+
344
+ #
345
+ # NEW FUNCTION TO CHECK FOR COMPLETED WORK
346
+ #
347
+ def check_if_done(save_dir: str, category: str, image_id: str, kpt_id: int) -> bool:
348
+ """
349
+ Checks if a keypoint has already been processed and saved with non-empty content.
350
+
351
+ Args:
352
+ save_dir (str): The root directory where results are saved.
353
+ category (str): The object category.
354
+ image_id (str): The filename of the image.
355
+ kpt_id (int): The keypoint ID to check.
356
+
357
+ Returns:
358
+ bool: True if the keypoint exists and has content, False otherwise.
359
+ """
360
+ # 1. Construct the expected path to the JSON file
361
+ category_dir = os.path.join(save_dir, category)
362
+ image_basename = os.path.splitext(os.path.basename(image_id))[0]
363
+ json_path = os.path.join(category_dir, f"{image_basename}.json")
364
+
365
+ # 2. If the file doesn't exist, it's definitely not done.
366
+ if not os.path.exists(json_path):
367
+ return False
368
+
369
+ try:
370
+ # 3. Read the existing JSON file
371
+ with open(json_path, 'r') as f:
372
+ data = json.load(f)
373
+
374
+ # 4. Check for the keypoint ID (as a string key)
375
+ kpt_entry = data.get("keypoints", {}).get(str(kpt_id))
376
+
377
+ if not kpt_entry:
378
+ # Keypoint not found in the file
379
+ return False
380
+
381
+ # 5. Check that the content is non-empty.
382
+ # We check if both actor and critic outputs exist and are not empty dictionaries.
383
+ actor_output = kpt_entry.get("actor_output")
384
+ critic_output = kpt_entry.get("critic_output")
385
+
386
+ if actor_output and critic_output:
387
+ # Both exist and are not empty/None, so we consider it done.
388
+ return True
389
+ else:
390
+ # One or both are missing or empty
391
+ return False
392
+
393
+ except (json.JSONDecodeError, FileNotFoundError):
394
+ # If the file is corrupted or can't be opened, treat it as not done.
395
+ # The robust saving function will overwrite it later.
396
+ print(f"Warning: Could not read or decode {json_path}. Will re-process.")
397
+ return False
398
+
399
+ return False
400
+
401
+
402
+ def get_existing_result(save_dir: str, category: str, image_id: str, kpt_id: int) -> Optional[Dict[str, Any]]:
403
+ """
404
+ Checks if a keypoint has already been processed and, if so, returns the saved data.
405
+
406
+ Args:
407
+ save_dir (str): The root directory where results are saved.
408
+ category (str): The object category.
409
+ image_id (str): The filename of the image.
410
+ kpt_id (int): The keypoint ID to check.
411
+
412
+ Returns:
413
+ Optional[Dict[str, Any]]: The dictionary for the keypoint entry if it exists
414
+ and is valid, otherwise None.
415
+ """
416
+ # 1. Construct the expected path to the JSON file
417
+ category_dir = os.path.join(save_dir, category)
418
+ image_basename = os.path.splitext(os.path.basename(image_id))[0]
419
+ json_path = os.path.join(category_dir, f"{image_basename}.json")
420
+
421
+ if not os.path.exists(json_path):
422
+ return None
423
+
424
+ try:
425
+ with open(json_path, 'r') as f:
426
+ data = json.load(f)
427
+
428
+ kpt_entry = data.get("keypoints", {}).get(str(kpt_id))
429
+
430
+ if not kpt_entry:
431
+ return None
432
+
433
+ # Check that the content is non-empty and valid
434
+ actor_output = kpt_entry.get("actor_output")
435
+ critic_output = kpt_entry.get("critic_output")
436
+
437
+ if actor_output and critic_output:
438
+ # Success! Return the entire entry for this keypoint.
439
+ return kpt_entry
440
+ else:
441
+ return None
442
+
443
+ except (json.JSONDecodeError, FileNotFoundError):
444
+ print(f"Warning: Could not read or decode {json_path}. Will re-process.")
445
+ return None
Code/sc_dit/vlm_judge_bbox_pred.py ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from openai import OpenAI
2
+ import os
3
+ from io import BytesIO
4
+ from PIL import Image
5
+ import base64
6
+ import numpy as np
7
+ import argparse
8
+ import pandas as pd
9
+ from tqdm import tqdm
10
+ from openai import AsyncClient, OpenAI
11
+ import wandb
12
+ from pydantic import BaseModel
13
+ from typing import Literal
14
+ import matplotlib.pyplot as plt
15
+
16
+ # custom imports
17
+ from utils import resize, draw_bbox_point_grid
18
+ from dataset import get_dataset_info
19
+
20
+
21
+ def encode_image_to_base64(image: Image.Image) -> str:
22
+ """Encodes a PIL image to a base64 data URI."""
23
+ buffered = BytesIO()
24
+ image.save(buffered, format="JPEG")
25
+ img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
26
+ return f"data:image/jpeg;base64,{img_str}"
27
+
28
+ # use structured output
29
+ class VLMJudgeOutput(BaseModel):
30
+ full_response: str # entire response
31
+ final_answer: Literal["yes", "no"]
32
+
33
+ def make_preview(src_display, tgt_display):
34
+ """Return a matplotlib Figure with both images + optional boxes."""
35
+ fig, axes = plt.subplots(1, 2, figsize=(6, 3))
36
+ axes[0].imshow(src_display); axes[0].axis("off"); axes[0].set_title("Source")
37
+ axes[1].imshow(tgt_display); axes[1].axis("off"); axes[1].set_title("Target")
38
+
39
+ fig.tight_layout()
40
+ return fig
41
+
42
+ # cnt = 0
43
+ def run_batched_evaluation(args, model, system_prompt, task_prompt):
44
+
45
+ run = wandb.init(
46
+ project=args.EVAL_DATASET,
47
+ entity="amazon_intern2025",
48
+ name=args.EXP_NOTE,
49
+ # config=vars(args)
50
+ )
51
+
52
+ columns = [
53
+ "category", "src_id", "tgt_id", "kpt_id",
54
+ "preview", "full_response",
55
+ "final_answer",
56
+ #"src_img_size", "tgt_img_size"
57
+ ]
58
+ results_tb = wandb.Table(columns=columns)
59
+
60
+ results_df = pd.read_csv(args.BBOX_FILE)
61
+
62
+ data_dir, categories, split = get_dataset_info(args, split='test')
63
+
64
+ for idx, row in tqdm(results_df.iterrows(), total=len(results_df)):
65
+ category, src_id, tgt_id = row['category'], str(row['src_id']), str(row['tgt_id'])
66
+ kpt_id = int(row['kpt_id'])
67
+ src_bbox = row['src_bbox']
68
+ tgt_bbox = row['tgt_bbox']
69
+
70
+ src_bbox = eval(src_bbox) if isinstance(src_bbox, str) else src_bbox
71
+ tgt_bbox = eval(tgt_bbox) if isinstance(tgt_bbox, str) else []
72
+
73
+ # return an empty list if tgt_bbox is nan
74
+ # if np.isnan(np.array(tgt_bbox).any()):
75
+ # tgt_bbox = []
76
+
77
+ src_img = Image.open(os.path.join(data_dir, 'JPEGImages', category, f"{src_id}.jpg"))
78
+ tgt_img = Image.open(os.path.join(data_dir, 'JPEGImages', category, f"{tgt_id}.jpg"))
79
+
80
+ src_img = resize(src_img, args.ANNO_SIZE)
81
+ tgt_img = resize(tgt_img, args.ANNO_SIZE)
82
+
83
+ src_image_display = draw_bbox_point_grid(
84
+ src_img.copy(),
85
+ bbox=src_bbox,
86
+ dilation=args.DILATION_SRC,
87
+ )
88
+
89
+ if len(tgt_bbox) == 0:
90
+ tgt_image_display = draw_bbox_point_grid(
91
+ tgt_img.copy(),
92
+ # bbox=tgt_bbox
93
+ )
94
+ elif len(tgt_bbox) > 0:
95
+ tgt_image_display = draw_bbox_point_grid(
96
+ tgt_img.copy(),
97
+ bbox=tgt_bbox,
98
+ dilation=args.DILATION_TGT,
99
+ )
100
+
101
+ print(f"Processing {category} - {src_id} to {tgt_id}")
102
+ print(f"Source bbox: {src_bbox}, Target bbox: {tgt_bbox}")
103
+
104
+ # response = model.chat.completions.create(
105
+ # # 1. Using a valid and powerful vision model
106
+ # model="gpt-4.1-mini",
107
+
108
+ # # 2. Consolidating text and images into a single user message
109
+ # messages=[
110
+ # {"role": "system", "content": system_prompt},
111
+ # {
112
+ # "role": "user",
113
+ # "content": [
114
+ # {"type": "text", "text": task_prompt},
115
+ # {"type": "image_url", "image_url": {"url": encode_image_to_base64(src_image_display)}},
116
+ # {"type": "image_url", "image_url": {"url": encode_image_to_base64(tgt_image_display)}}
117
+ # ]
118
+ # }
119
+ # ],
120
+ # max_tokens=1000,
121
+ # temperature=0.0,
122
+ # top_p=1.0,
123
+ # n=1,
124
+ # # 3. Correctly formatting the 'response_format' parameter as a dictionary
125
+ # # response_format={"type": "json_object"}
126
+ # )
127
+
128
+ fig = make_preview(src_image_display, tgt_image_display)
129
+ wandb_img = wandb.Image(fig)
130
+ plt.close(fig)
131
+
132
+ response = model.responses.parse(
133
+ model=args.MODEL_NAME,
134
+ input=[
135
+ {"role": "system", "content": f"{system_prompt}"},
136
+ {
137
+ "role": "user",
138
+ "content": [
139
+ {"type": "input_text", "text": f"{task_prompt}"},
140
+ {"type": "input_image", "image_url": encode_image_to_base64(src_image_display)},
141
+ {"type": "input_image", "image_url": encode_image_to_base64(tgt_image_display)}
142
+ ],
143
+ },
144
+ ],
145
+ text_format=VLMJudgeOutput,
146
+ )
147
+
148
+ # print(response.choices[0].message.content)
149
+ output = response.output_parsed
150
+ # print(output)
151
+ print(f"Final answer: {output.final_answer}")
152
+ print(f"Full response: {output.full_response}")
153
+
154
+ results_tb.add_data(
155
+ category,
156
+ src_id,
157
+ tgt_id,
158
+ kpt_id,
159
+ wandb_img,
160
+ output.full_response,
161
+ output.final_answer,
162
+ )
163
+
164
+
165
+ # print(type(src_bbox), type(tgt_bbox))
166
+ # for debugging purposes
167
+ if args.TEST_RUN_SAMPLE > 0 and idx >= args.TEST_RUN_SAMPLE - 1:
168
+ break
169
+
170
+
171
+ wandb.log({"evaluation_results": results_tb})
172
+ run.finish()
173
+ # break
174
+
175
+ def main(args):
176
+
177
+ # pass
178
+
179
+ with open(args.SYSTEM_PROMPT, 'r') as f:
180
+ system_prompt = f.read()
181
+
182
+ with open(args.TASK_PROMPT, 'r') as f:
183
+ task_prompt = f.read()
184
+
185
+ # print(system_prompt)
186
+ # print(task_prompt)
187
+
188
+ # Initialize the OpenAI VLM model
189
+ print("Initializing OpenAI model...")
190
+ # model = OpenAI()
191
+ model = OpenAI(api_key="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")
192
+ run_batched_evaluation(args, model, system_prompt, task_prompt)
193
+
194
+
195
+
196
+
197
+ if __name__ == "__main__":
198
+ parser = argparse.ArgumentParser(description="Process some images.")
199
+ parser.add_argument("--SYSTEM_PROMPT", type=str, required=True, help="System prompt for the model.")
200
+ parser.add_argument("--TASK_PROMPT", type=str, required=True, help="Text prompt for the model.")
201
+ parser.add_argument("--EVAL_DATASET", type=str, required=True, choices=['pascal', 'spair'], help="Dataset to use for evaluation.")
202
+ # parser.add_argument("--TEST_SAMPLE", type=int, default=10, help
203
+ parser.add_argument("--BBOX_FILE", type=str, required=True, help="Path to the bounding box file.")
204
+
205
+
206
+ # arguments with default values
207
+ parser.add_argument("--ANNO_SIZE", type=int, default=840, help="Size of the annotation.")
208
+ parser.add_argument("--DILATION_SRC", type=int, default=28, help="Dilation for source bounding box.")
209
+ parser.add_argument("--DILATION_TGT", type=int, default=0, help="Dilation for target bounding box.")
210
+ parser.add_argument("--DATA_DIR", type=str, default="./data", help="Path to the dataset directory.")
211
+ parser.add_argument("--EXP_NOTE", type=str, default="gpt_judge", help="Experiment note for WandB logging.")
212
+ parser.add_argument("--TEST_RUN_SAMPLE", type=int, default=0, help="Number of test samples to use (0 for all).")
213
+ parser.add_argument("--MODEL_NAME", type=str, default="gpt-4.1-mini", help="Name of the OpenAI model to use.")
214
+
215
+ args = parser.parse_args()
216
+ main(args)
Code/sc_dit/vlm_judge_bbox_pred_batched.py ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import asyncio
3
+ import json
4
+ import base64
5
+ from io import BytesIO
6
+ from typing import Literal
7
+
8
+ import argparse
9
+ import pandas as pd
10
+ import numpy as np
11
+ import matplotlib.pyplot as plt
12
+ from PIL import Image
13
+ from tqdm.asyncio import tqdm_asyncio
14
+ from openai import AsyncClient
15
+ import wandb
16
+ from pydantic import BaseModel, ValidationError
17
+ from tqdm import tqdm
18
+ import instructor
19
+
20
+ # custom imports
21
+ from utils import resize, draw_bbox_point_grid
22
+ from dataset import get_dataset_info
23
+
24
+ # --- Constants & Configuration ---
25
+ # Set a reasonable concurrency limit to avoid API rate limiting
26
+ CONCURRENCY_LIMIT = 4
27
+
28
+ # --- Helper Functions & Pydantic Models ---
29
+
30
+ def encode_image_to_base64(image: Image.Image) -> str:
31
+ """Encodes a PIL image to a base64 data URI."""
32
+ buffered = BytesIO()
33
+ image.save(buffered, format="JPEG")
34
+ img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
35
+ return f"data:image/jpeg;base64,{img_str}"
36
+
37
+ class VLMJudgeOutput(BaseModel):
38
+ """Pydantic model for structured output from the VLM."""
39
+ full_response: str
40
+ final_answer: Literal["yes", "no"]
41
+
42
+ def make_preview(src_display: Image.Image, tgt_display: Image.Image) -> plt.Figure:
43
+ """Return a matplotlib Figure with both source and target images."""
44
+ fig, axes = plt.subplots(1, 2, figsize=(6, 3))
45
+ axes[0].imshow(src_display); axes[0].axis("off"); axes[0].set_title("Source")
46
+ axes[1].imshow(tgt_display); axes[1].axis("off"); axes[1].set_title("Target")
47
+ fig.tight_layout()
48
+ return fig
49
+
50
+ # --- Core Asynchronous Logic ---
51
+
52
+ async def process_single_item(
53
+ client: AsyncClient,
54
+ semaphore: asyncio.Semaphore,
55
+ messages: list,
56
+ model_name: str
57
+ ) -> dict:
58
+ """Acquires semaphore and makes a single async API call to OpenAI."""
59
+ async with semaphore:
60
+ try:
61
+ response = await client.chat.completions.create(
62
+ model=model_name,
63
+ messages=messages,
64
+ max_tokens=1000,
65
+ temperature=0.0,
66
+ top_p=1.0,
67
+ n=1,
68
+ # response_format={"type": "json_object"}
69
+ response_model=VLMJudgeOutput
70
+ )
71
+ return response
72
+ except Exception as e:
73
+ # Return the exception to be handled later
74
+ return e
75
+
76
+
77
+
78
+ async def run_batched_evaluation(args, client: AsyncClient, system_prompt: str, task_prompt: str):
79
+ """Prepares data, runs asynchronous evaluations IN BATCHES, and logs results."""
80
+ run = wandb.init(
81
+ project=args.EVAL_DATASET,
82
+ name=args.EXP_NOTE,
83
+ config=vars(args)
84
+ )
85
+ results_tb = wandb.Table(columns=[
86
+ "category", "src_id", "tgt_id", "kpt_id", "preview",
87
+ "full_response", "final_answer", "error"
88
+ ])
89
+
90
+ df = pd.read_csv(args.BBOX_FILE)
91
+ if args.TEST_RUN_SAMPLE > 0:
92
+ df = df.head(args.TEST_RUN_SAMPLE)
93
+
94
+ data_dir, _, _ = get_dataset_info(args, split='test')
95
+
96
+ # Process the dataframe in smaller batches to control memory usage
97
+ num_batches = (len(df) - 1) // args.BATCH_SIZE + 1
98
+ for i in range(num_batches):
99
+ start_index = i * args.BATCH_SIZE
100
+ end_index = start_index + args.BATCH_SIZE
101
+ batch_df = df.iloc[start_index:end_index]
102
+
103
+ print(f"\n--- Processing Batch {i+1}/{num_batches} (items {start_index}-{end_index-1}) ---")
104
+
105
+ # 1. PREPARE PAYLOADS FOR THE CURRENT BATCH
106
+ payloads = []
107
+ for _, row in batch_df.iterrows():
108
+ category, src_id, tgt_id = row['category'], str(row['src_id']), str(row['tgt_id'])
109
+
110
+ src_img_path = os.path.join(data_dir, 'JPEGImages', category, f"{src_id}.jpg")
111
+ tgt_img_path = os.path.join(data_dir, 'JPEGImages', category, f"{tgt_id}.jpg")
112
+
113
+ src_img = resize(Image.open(src_img_path), args.ANNO_SIZE)
114
+ tgt_img = resize(Image.open(tgt_img_path), args.ANNO_SIZE)
115
+
116
+ src_bbox = eval(row['src_bbox'])
117
+ tgt_bbox = eval(row['tgt_bbox']) if 'tgt_bbox' in row and pd.notna(row['tgt_bbox']) else []
118
+
119
+ src_display = draw_bbox_point_grid(src_img.copy(), bbox=src_bbox, dilation=args.DILATION_SRC)
120
+ tgt_display = draw_bbox_point_grid(tgt_img.copy(), bbox=tgt_bbox if tgt_bbox else None, dilation=args.DILATION_TGT)
121
+
122
+ messages = [
123
+ {"role": "system", "content": system_prompt},
124
+ {"role": "user", "content": [
125
+ {"type": "text", "text": task_prompt},
126
+ {"type": "image_url", "image_url": {"url": encode_image_to_base64(src_display)}},
127
+ {"type": "image_url", "image_url": {"url": encode_image_to_base64(tgt_display)}}
128
+ ]}
129
+ ]
130
+
131
+ fig = make_preview(src_display, tgt_display)
132
+ payloads.append({
133
+ "messages": messages,
134
+ "metadata": row.to_dict(),
135
+ "wandb_img": wandb.Image(fig)
136
+ })
137
+ plt.close(fig)
138
+
139
+ # 2. EXECUTE API CALLS FOR THE CURRENT BATCH
140
+ print(f"Sending {len(payloads)} requests for this batch...")
141
+ semaphore = asyncio.Semaphore(CONCURRENCY_LIMIT)
142
+ tasks = [
143
+ process_single_item(client, semaphore, p["messages"], args.MODEL_NAME)
144
+ for p in payloads
145
+ ]
146
+ # Use the standard asyncio.gather which correctly handles return_exceptions
147
+ api_responses = await asyncio.gather(*tasks, return_exceptions=True)
148
+
149
+ # 3. PROCESS AND LOG RESULTS FOR THE CURRENT BATCH
150
+ # Wrap the results loop in tqdm for a progress bar
151
+ # 3. PROCESS AND LOG RESULTS FOR THE CURRENT BATCH
152
+ for payload, parsed_output in tqdm(zip(payloads, api_responses), total=len(payloads), desc="Processing responses"):
153
+ meta = payload['metadata']
154
+
155
+ # Since instructor handles parsing, the response is either a VLMJudgeOutput object or an Exception
156
+ if isinstance(parsed_output, Exception):
157
+ print(f"ERROR processing {meta['src_id']}->{meta['tgt_id']}: {parsed_output}")
158
+ results_tb.add_data(
159
+ meta['category'], meta['src_id'], meta['tgt_id'], int(meta['kpt_id']),
160
+ payload['wandb_img'], None, None, str(parsed_output)
161
+ )
162
+ else:
163
+ # No need for try-except block, 'parsed_output' is already a validated Pydantic object
164
+ results_tb.add_data(
165
+ meta['category'], meta['src_id'], meta['tgt_id'], int(meta['kpt_id']),
166
+ payload['wandb_img'], parsed_output.full_response,
167
+ parsed_output.final_answer, None
168
+ )
169
+
170
+ # Log the table periodically to save progress
171
+ wandb.log({"evaluation_results": results_tb})
172
+
173
+ run.finish()
174
+ print("\nEvaluation complete.")
175
+
176
+ async def main(args):
177
+ """Main function to set up and run the evaluation."""
178
+ with open(args.SYSTEM_PROMPT, 'r') as f:
179
+ system_prompt = f.read()
180
+ with open(args.TASK_PROMPT, 'r') as f:
181
+ task_prompt = f.read()
182
+
183
+ # Use the async client
184
+ # The client automatically reads the OPENAI_API_KEY environment variable
185
+ client = instructor.patch(AsyncClient(api_key="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"))
186
+ await run_batched_evaluation(args, client, system_prompt, task_prompt)
187
+
188
+ if __name__ == "__main__":
189
+ parser = argparse.ArgumentParser(description="Run batched VLM evaluation asynchronously.")
190
+ parser.add_argument("--SYSTEM_PROMPT", type=str, required=True, help="Path to the system prompt file.")
191
+ parser.add_argument("--TASK_PROMPT", type=str, required=True, help="Path to the task prompt file.")
192
+ parser.add_argument("--EVAL_DATASET", type=str, required=True, choices=['pascal', 'spair'], help="Dataset for evaluation.")
193
+ parser.add_argument("--BBOX_FILE", type=str, required=True, help="Path to the CSV file with bounding box data.")
194
+
195
+ # Arguments with default values
196
+ parser.add_argument("--ANNO_SIZE", type=int, default=840, help="Size to resize images to.")
197
+ parser.add_argument("--DILATION_SRC", type=int, default=28, help="Dilation for the source bounding box overlay.")
198
+ parser.add_argument("--DILATION_TGT", type=int, default=0, help="Dilation for the target bounding box overlay.")
199
+ parser.add_argument("--DATA_DIR", type=str, default="./data", help="Path to the dataset directory.")
200
+ parser.add_argument("--EXP_NOTE", type=str, default="gpt_judge_async", help="Experiment note for WandB logging.")
201
+ parser.add_argument("--TEST_RUN_SAMPLE", type=int, default=0, help="Number of samples to run (0 for all).")
202
+ parser.add_argument("--MODEL_NAME", type=str, default="gpt-4o-mini", help="Name of the OpenAI model to use.")
203
+ parser.add_argument("--BATCH_SIZE", type=int, default=10, help="Number of items to process in each batch.")
204
+
205
+ args = parser.parse_args()
206
+
207
+ # Run the main async function
208
+ asyncio.run(main(args))
Code/sc_dit/vlm_visualize_single_pred.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
Code/sc_env_torch_113.yaml ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: sc_env_torch_113
2
+ channels:
3
+ - defaults
4
+ - conda-forge
5
+ - nvidia
6
+ - pytorch
7
+ dependencies:
8
+ - _libgcc_mutex=0.1=main
9
+ - _openmp_mutex=5.1=1_gnu
10
+ - asttokens=3.0.0=pyhd8ed1ab_1
11
+ - blas=1.0=mkl
12
+ - brotli-python=1.0.9=py39h6a678d5_9
13
+ - bzip2=1.0.8=h5eee18b_6
14
+ - ca-certificates=2025.4.26=hbd8a1cb_0
15
+ - certifi=2025.4.26=pyhd8ed1ab_0
16
+ - charset-normalizer=3.3.2=pyhd3eb1b0_0
17
+ - comm=0.2.2=pyhd8ed1ab_1
18
+ - cuda-cudart=11.7.99=0
19
+ - cuda-cupti=11.7.101=0
20
+ - cuda-libraries=11.7.1=0
21
+ - cuda-nvrtc=11.7.99=0
22
+ - cuda-nvtx=11.7.91=0
23
+ - cuda-runtime=11.7.1=0
24
+ - cudatoolkit=11.7.0=hd8887f6_10
25
+ - debugpy=1.8.14=py39hf88036b_0
26
+ - decorator=5.2.1=pyhd8ed1ab_0
27
+ - exceptiongroup=1.3.0=pyhd8ed1ab_0
28
+ - executing=2.2.0=pyhd8ed1ab_0
29
+ - ffmpeg=4.3=hf484d3e_0
30
+ - freetype=2.13.3=h4a9f257_0
31
+ - gmp=6.3.0=h6a678d5_0
32
+ - gnutls=3.6.15=he1e5248_0
33
+ - idna=3.7=py39h06a4308_0
34
+ - intel-openmp=2023.1.0=hdb19cb5_46306
35
+ - ipykernel=6.29.5=pyh3099207_0
36
+ - ipython=8.18.1=pyh707e725_3
37
+ - jedi=0.19.2=pyhd8ed1ab_1
38
+ - jpeg=9e=h5eee18b_3
39
+ - jupyter_client=8.6.3=pyhd8ed1ab_1
40
+ - jupyter_core=5.7.2=pyh31011fe_1
41
+ - keyutils=1.6.1=h166bdaf_0
42
+ - krb5=1.21.3=h659f571_0
43
+ - lame=3.100=h7b6447c_0
44
+ - lcms2=2.16=h92b89f2_1
45
+ - ld_impl_linux-64=2.40=h12ee557_0
46
+ - lerc=4.0.0=h6a678d5_0
47
+ - libcublas=11.10.3.66=0
48
+ - libcufft=10.7.2.124=h4fbf590_0
49
+ - libcufile=1.9.1.3=0
50
+ - libcurand=10.3.5.147=0
51
+ - libcusolver=11.4.0.1=0
52
+ - libcusparse=11.7.4.91=0
53
+ - libdeflate=1.22=h5eee18b_0
54
+ - libedit=3.1.20191231=he28a2e2_2
55
+ - libffi=3.4.4=h6a678d5_1
56
+ - libgcc=15.1.0=h767d61c_2
57
+ - libgcc-ng=15.1.0=h69a702a_2
58
+ - libgomp=15.1.0=h767d61c_2
59
+ - libiconv=1.16=h5eee18b_3
60
+ - libidn2=2.3.4=h5eee18b_0
61
+ - libnpp=11.7.4.75=0
62
+ - libnvjpeg=11.8.0.2=0
63
+ - libpng=1.6.39=h5eee18b_0
64
+ - libsodium=1.0.20=h4ab18f5_0
65
+ - libstdcxx=15.1.0=h8f9b012_2
66
+ - libstdcxx-ng=15.1.0=h4852527_2
67
+ - libtasn1=4.19.0=h5eee18b_0
68
+ - libtiff=4.7.0=hde9077f_0
69
+ - libunistring=0.9.10=h27cfd23_0
70
+ - libwebp-base=1.3.2=h5eee18b_1
71
+ - lz4-c=1.9.4=h6a678d5_1
72
+ - matplotlib-inline=0.1.7=pyhd8ed1ab_1
73
+ - mkl=2023.1.0=h213fc3f_46344
74
+ - mkl-service=2.4.0=py39h5eee18b_2
75
+ - mkl_fft=1.3.11=py39h5eee18b_0
76
+ - mkl_random=1.2.8=py39h1128e8f_0
77
+ - ncurses=6.4=h6a678d5_0
78
+ - nest-asyncio=1.6.0=pyhd8ed1ab_1
79
+ - nettle=3.7.3=hbbd107a_1
80
+ - openh264=2.1.1=h4ff587b_0
81
+ - openjpeg=2.5.2=h0d4d230_1
82
+ - openssl=3.5.0=h7b32b05_1
83
+ - packaging=25.0=pyh29332c3_1
84
+ - parso=0.8.4=pyhd8ed1ab_1
85
+ - pexpect=4.9.0=pyhd8ed1ab_1
86
+ - pickleshare=0.7.5=pyhd8ed1ab_1004
87
+ - pip=25.1=pyhc872135_2
88
+ - platformdirs=4.3.8=pyhe01879c_0
89
+ - prompt-toolkit=3.0.51=pyha770c72_0
90
+ - psutil=7.0.0=py39h8cd3c5a_0
91
+ - ptyprocess=0.7.0=pyhd8ed1ab_1
92
+ - pure_eval=0.2.3=pyhd8ed1ab_1
93
+ - pygments=2.19.1=pyhd8ed1ab_0
94
+ - pysocks=1.7.1=py39h06a4308_0
95
+ - python=3.9.21=he870216_1
96
+ - python-dateutil=2.9.0.post0=pyhff2d567_1
97
+ - python_abi=3.9=2_cp39
98
+ - pytorch=1.13.1=py3.9_cuda11.7_cudnn8.5.0_0
99
+ - pytorch-cuda=11.7=h778d358_5
100
+ - pytorch-mutex=1.0=cuda
101
+ - pyzmq=26.4.0=py39h4e4fb57_0
102
+ - readline=8.2=h5eee18b_0
103
+ - requests=2.32.3=py39h06a4308_1
104
+ - setuptools=78.1.1=py39h06a4308_0
105
+ - six=1.17.0=pyhd8ed1ab_0
106
+ - sqlite=3.45.3=h5eee18b_0
107
+ - stack_data=0.6.3=pyhd8ed1ab_1
108
+ - tbb=2021.8.0=hdb19cb5_0
109
+ - tk=8.6.14=h39e8969_0
110
+ - torchvision=0.14.1=py39_cu117
111
+ - tornado=6.5=py39h8cd3c5a_0
112
+ - traitlets=5.14.3=pyhd8ed1ab_1
113
+ - typing_extensions=4.12.2=py39h06a4308_0
114
+ - wcwidth=0.2.13=pyhd8ed1ab_1
115
+ - wheel=0.45.1=py39h06a4308_0
116
+ - xz=5.6.4=h5eee18b_1
117
+ - zeromq=4.3.5=h3b0a872_7
118
+ - zipp=3.21.0=pyhd8ed1ab_1
119
+ - zlib=1.2.13=h5eee18b_1
120
+ - zstd=1.5.6=hc292b87_0
121
+ - pip:
122
+ - absl-py==2.2.2
123
+ - aiohappyeyeballs==2.6.1
124
+ - aiohttp==3.11.18
125
+ - aiosignal==1.3.2
126
+ - albumentations==1.3.0
127
+ - annotated-types==0.7.0
128
+ - antlr4-python3-runtime==4.8
129
+ - appdirs==1.4.4
130
+ - async-timeout==5.0.1
131
+ - attrs==25.3.0
132
+ - beautifulsoup4==4.13.4
133
+ - black==21.4b2
134
+ - boto3==1.38.21
135
+ - botocore==1.38.21
136
+ - click==8.1.8
137
+ - cloudpickle==3.1.1
138
+ - contourpy==1.3.0
139
+ - cycler==0.12.1
140
+ - cython==3.1.1
141
+ - detectron2==0.6
142
+ - diffdist==0.1
143
+ - docker-pycreds==0.4.0
144
+ - einops==0.3.0
145
+ - eval-type-backport==0.2.2
146
+ - filelock==3.18.0
147
+ - fonttools==4.58.0
148
+ - frozenlist==1.6.0
149
+ - fsspec==2025.5.0
150
+ - ftfy==6.3.1
151
+ - future==1.0.0
152
+ - fvcore==0.1.5.post20221221
153
+ - gdown==5.2.0
154
+ - gitdb==4.0.12
155
+ - gitpython==3.1.44
156
+ - grpcio==1.71.0
157
+ - huggingface-hub==0.31.4
158
+ - hydra-core==1.1.1
159
+ - imageio==2.37.0
160
+ - importlib-metadata==8.7.0
161
+ - importlib-resources==6.5.2
162
+ - iopath==0.1.9
163
+ - jmespath==1.0.1
164
+ - joblib==1.5.0
165
+ - kiwisolver==1.4.7
166
+ - kornia==0.6.0
167
+ - lazy-loader==0.4
168
+ - loguru==0.7.3
169
+ - lvis==0.5.3
170
+ - markdown==3.8
171
+ - markupsafe==3.0.2
172
+ - matplotlib==3.9.4
173
+ - multidict==6.4.4
174
+ - mypy-extensions==1.1.0
175
+ - networkx==3.2.1
176
+ - nltk==3.9.1
177
+ - numpy==1.23.5
178
+ - omegaconf==2.1.1
179
+ - open-clip-torch==2.0.2
180
+ - opencv-python==4.6.0.66
181
+ - opencv-python-headless==4.11.0.86
182
+ - pandas==2.2.3
183
+ - panopticapi==0.1
184
+ - pathspec==0.12.1
185
+ - pillow==9.5.0
186
+ - portalocker==3.1.1
187
+ - propcache==0.3.1
188
+ - protobuf==6.31.0
189
+ - pycocotools==2.0.8
190
+ - pydantic==2.11.4
191
+ - pydantic-core==2.33.2
192
+ - pydeprecate==0.3.1
193
+ - pydot==4.0.0
194
+ - pyparsing==3.2.3
195
+ - pyre-extensions==0.0.23
196
+ - pytorch-lightning==1.4.2
197
+ - pytz==2025.2
198
+ - pyyaml==6.0.2
199
+ - qudida==0.0.4
200
+ - regex==2024.11.6
201
+ - s3transfer==0.12.0
202
+ - scikit-image==0.24.0
203
+ - scikit-learn==1.6.1
204
+ - scipy==1.13.1
205
+ - sentry-sdk==2.29.1
206
+ - setproctitle==1.3.6
207
+ - smmap==5.0.2
208
+ - soupsieve==2.7
209
+ - stable-diffusion-sdkit==2.1.3
210
+ - tabulate==0.9.0
211
+ - tensorboard==2.19.0
212
+ - tensorboard-data-server==0.7.2
213
+ - termcolor==3.1.0
214
+ - test-tube==0.7.5
215
+ - threadpoolctl==3.6.0
216
+ - tifffile==2024.8.30
217
+ - timm==0.6.11
218
+ - tokenizers==0.13.3
219
+ - toml==0.10.2
220
+ - torchmetrics==0.6.0
221
+ - tqdm==4.67.1
222
+ - transformers==4.26.1
223
+ - typing-inspect==0.9.0
224
+ - typing-inspection==0.4.1
225
+ - tzdata==2025.2
226
+ - urllib3==1.26.20
227
+ - wandb==0.19.11
228
+ - werkzeug==3.1.3
229
+ - xformers==0.0.16
230
+ - yacs==0.1.8
231
+ - yarl==1.20.0
232
+ prefix: /opt/conda/envs/sc_env_torch1_13
README.md ADDED
@@ -0,0 +1 @@
 
 
1
+ # SemCorrVLM_raw