jquenum commited on
Commit
16aa0e5
·
verified ·
1 Parent(s): d994279

Upload folder using huggingface_hub

Browse files
src/data_scripts/trainvalsplit.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ trainvalsplit.py is a script that splits an MS COCO formatted dataset into train and val
3
+ partitions. For sample usage, run from command line:
4
+
5
+ Example:
6
+ python trainvalsplit.py --help
7
+ """
8
+
9
+ # Standard Library imports:
10
+ import argparse
11
+ import sys
12
+ import subprocess
13
+ from pathlib import Path
14
+
15
+ # h4dlib imports:
16
+ # import _import_helper # pylint: disable=unused-import # noqa: F401
17
+ # PROJ_ROOT = subprocess.check_output(["git", "rev-parse", "--show-toplevel"]).strip().decode("utf-8")
18
+ try:
19
+ PROJ_ROOT = subprocess.check_output(["git", "rev-parse", "--show-toplevel"]).strip().decode("utf-8")
20
+ except subprocess.CalledProcessError:
21
+ print("Error: Not inside a Git repository.")
22
+ PROJ_ROOT = None
23
+
24
+ print("PROJ ROOOOOOT: ", PROJ_ROOT)
25
+ h4dlib_path = (Path(PROJ_ROOT) / "src/utilities/h4dlib").resolve()
26
+ print(h4dlib_path)
27
+ assert h4dlib_path.exists()
28
+ if str(h4dlib_path) not in sys.path:
29
+ sys.path.append(str(h4dlib_path))
30
+
31
+
32
+ from h4dlib.data.cocohelpers import CocoClassDistHelper, CocoJsonBuilder, split
33
+
34
+ # Used to check the results of the split--all classes in both splits
35
+ # should have at least this many annotations:
36
+ _CLASS_COUNT_THRESHOLD = 0
37
+
38
+
39
+ def create_split(
40
+ input_json: Path,
41
+ output_path: Path,
42
+ output_json_name: str,
43
+ seed: int,
44
+ test_size: float = 0.2,
45
+ ) -> CocoClassDistHelper:
46
+ """
47
+ Creates train/val split for the coco-formatted dataset defined by input_json.
48
+
49
+ params:
50
+ input_json: full path or Path object to coco-formatted input json file.
51
+ output_path: full path or Path object to directory where outputted json will be
52
+ saved. output_json_name:
53
+ """
54
+ coco = CocoClassDistHelper(input_json)
55
+ train_img_ids, val_img_ids = split(
56
+ coco.img_ids, test_size=test_size, random_state=seed
57
+ )
58
+ train_counts, train_percents = coco.get_class_dist(train_img_ids)
59
+ val_counts, val_percents = coco.get_class_dist(val_img_ids)
60
+
61
+ # Generate coco-formatted json's for train and val:
62
+ def generate_coco_json(coco, split_type, img_ids):
63
+ coco_builder = CocoJsonBuilder(
64
+ coco.cats,
65
+ dest_path=output_path,
66
+ dest_name=output_json_name.format(split_type),
67
+ )
68
+ for idx, img_id in enumerate(img_ids):
69
+ coco_builder.add_image(coco.imgs[img_id], coco.imgToAnns[img_id])
70
+ coco_builder.save()
71
+
72
+ generate_coco_json(coco, "train", train_img_ids)
73
+ generate_coco_json(coco, "val", val_img_ids)
74
+ return coco
75
+
76
+
77
+ def verify_output(
78
+ original_coco: CocoClassDistHelper, output_path: Path, output_json_name: str
79
+ ) -> None:
80
+ """
81
+ Verify that the outputted json's for the train/val split can be loaded, and
82
+ have correct number of annotations, and minimum count for each class meets
83
+ our threshold.
84
+ """
85
+
86
+ def verify_split_part(output_json_name, split_part):
87
+ json_path = output_path / output_json_name.format(split_part)
88
+ print(f"Checking if we can load json via coco api:{json_path}...")
89
+ coco = CocoClassDistHelper(json_path)
90
+ counts, _ = coco.get_class_dist()
91
+ assert min(counts.values()) >= _CLASS_COUNT_THRESHOLD, (
92
+ f"min class count ({min(counts.values())}) is "
93
+ + f"lower than threshold of {_CLASS_COUNT_THRESHOLD}"
94
+ )
95
+ print(f"{split_part} class counts: ", counts)
96
+ return coco
97
+
98
+ train_coco = verify_split_part(output_json_name, "train")
99
+ val_coco = verify_split_part(output_json_name, "val")
100
+ assert len(original_coco.imgs) == len(train_coco.imgs) + len(
101
+ val_coco.imgs
102
+ ), "Num Images in original data should equal sum of imgs in splits."
103
+ assert len(original_coco.anns) == len(train_coco.anns) + len(
104
+ val_coco.anns
105
+ ), "Num annotations in original data should equal sum of those in splits."
106
+
107
+
108
+ def main(args: argparse.Namespace):
109
+ """
110
+ Creates train/val split and verifies output.
111
+ params:
112
+ opt: command line options (there are none right now)
113
+ output_json_name: format-string of output file names, with a '{}'
114
+ style placeholder where split type will be inserted.
115
+ """
116
+ input_json = Path(args.input_json).resolve()
117
+ assert input_json.exists(), str(input_json)
118
+ assert input_json.is_file(), str(input_json)
119
+
120
+ output_path = Path(args.output_dir).resolve()
121
+ assert output_path.is_dir(), str(output_path)
122
+ output_path.mkdir(exist_ok=True, parents=True)
123
+ output_json_name = input_json.stem.replace("_full", "") + "_{}.json"
124
+ original_coco = create_split(
125
+ input_json, output_path, output_json_name, args.seed, args.val_split_size
126
+ )
127
+ verify_output(original_coco, output_path, output_json_name)
128
+
129
+
130
+ if __name__ == "__main__":
131
+ parser = argparse.ArgumentParser()
132
+ parser.add_argument("--val_split_size", type=float, default=0.2)
133
+ parser.add_argument(
134
+ "--seed",
135
+ type=int,
136
+ help="Random seed. Use split_search.py to find a seed that generates a good split",
137
+ )
138
+ parser.add_argument("--input_json", type=Path, help="Input json path")
139
+ parser.add_argument("--output_dir", type=Path, help="Path to output json")
140
+ args = parser.parse_args()
141
+ main(args)
src/utilities/h4dlib/h4dlib/data/__pycache__/cocohelpers.cpython-39.pyc ADDED
Binary file (43.1 kB). View file
 
src/utilities/h4dlib/h4dlib/data/cocohelpers.py ADDED
@@ -0,0 +1,1568 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ cocohelpers is a module with helper classes and functions related to the MS
3
+ COCO API. Includes helpers for building COCO formatted json, inspecting class
4
+ distribution, and generating a train/val split.
5
+ """
6
+
7
+ # Standard Library imports:
8
+ import json
9
+ import random
10
+ from collections import Counter, defaultdict
11
+ from copy import deepcopy
12
+ from dataclasses import dataclass
13
+ from pathlib import Path
14
+ from shutil import copy
15
+ from typing import Any, Dict, List, OrderedDict, Tuple
16
+
17
+ # 3rd Party imports:
18
+ import numpy as np
19
+ from pycocotools.coco import COCO
20
+
21
+ __all__ = ["CocoJsonBuilder", "COCOShrinker", "CocoClassDistHelper", "split"]
22
+
23
+ full_categories = {
24
+ 11: "Fixed-wing Aircraft",
25
+ 12: "Small Aircraft",
26
+ 13: "Cargo Plane",
27
+ 15: "Helicopter",
28
+ 16: "S&R Helicopter",
29
+ 17: "Passenger Vehicle",
30
+ 18: "Small Car",
31
+ 19: "Bus",
32
+ 20: "Pickup Truck",
33
+ 21: "Utility Truck",
34
+ 22: "Ambulance",
35
+ 23: "Truck",
36
+ 24: "Cargo Truck",
37
+ 25: "Truck w/Box",
38
+ 26: "Truck Tractor",
39
+ 27: "Trailer",
40
+ 28: "Truck w/Flatbed",
41
+ 29: "Truck w/Liquid",
42
+ 32: "Crane Truck",
43
+ 33: "Railway Vehicle",
44
+ 34: "Passenger Car",
45
+ 35: "Cargo Car",
46
+ 36: "Flat Car",
47
+ 37: "Tank car",
48
+ 38: "Locomotive",
49
+ 40: "Maritime Vessel",
50
+ 41: "Motorboat",
51
+ 42: "Sailboat",
52
+ 44: "Tugboat",
53
+ 45: "Barge",
54
+ 46: "Crane Vessel",
55
+ 47: "Fishing Vessel",
56
+ 48: "Cruise Ship",
57
+ 49: "Ferry",
58
+ 50: "Yacht",
59
+ 51: "Container Ship",
60
+ 52: "Oil Tanker",
61
+ 53: "Engineering Vehicle",
62
+ 54: "Tower crane",
63
+ 55: "Container Crane",
64
+ 56: "Reach Stacker",
65
+ 57: "Straddle Carrier",
66
+ 58: "Container Handler",
67
+ 59: "Mobile Crane",
68
+ 60: "Dump Truck",
69
+ 61: "Haul Truck",
70
+ 62: "Tractor",
71
+ 63: "Front loader/Bulldozer",
72
+ 64: "Excavator",
73
+ 65: "Cement Mixer",
74
+ 66: "Ground Grader",
75
+ 67: "Scraper",
76
+ 69: "Power Shovel",
77
+ 70: "Bucket-wheel Excavator",
78
+ 71: "Hut/Tent",
79
+ 72: "Shed",
80
+ 73: "Building",
81
+ 74: "Aircraft Hangar",
82
+ 75: "UNK1",
83
+ 76: "Damaged Building",
84
+ 77: "Facility",
85
+ 78: "Stadium",
86
+ 79: "Construction Site",
87
+ 81: "Marina",
88
+ 82: "UNK2",
89
+ 83: "Vehicle Lot",
90
+ 84: "Helipad",
91
+ 86: "Storage Tank",
92
+ 87: "UNK3",
93
+ 89: "Shipping Container Lot",
94
+ 91: "Shipping Container",
95
+ 93: "Pylon",
96
+ 94: "Tower",
97
+ 96: "Water Tower",
98
+ 97: "Wind Turbine",
99
+ 98: "Lighthouse",
100
+ 99: "Cooling Tower",
101
+ 100: "Smokestack",
102
+ }
103
+
104
+
105
+ class ReIndex:
106
+ """
107
+ A class used to reindex categories.
108
+ """
109
+
110
+ def __init__(self, coco):
111
+ self.cats = coco.dataset["categories"]
112
+ self.anns = coco.dataset["annotations"]
113
+ self.id2name = {cat["id"]: cat["name"] for i, cat in enumerate(self.cats)}
114
+ self.id2id = {cat["id"]: i + 1 for i, cat in enumerate(self.cats)}
115
+
116
+ self.new_cats = [
117
+ {
118
+ "supercategory": cat["supercategory"],
119
+ "id": self.id2id[cat["id"]],
120
+ "name": cat["name"],
121
+ }
122
+ for cat in self.cats
123
+ ]
124
+
125
+ print("new cats: ", self.new_cats)
126
+
127
+ self.new_anns = [
128
+ {
129
+ "segmentation": ann["segmentation"],
130
+ "bbox": ann["bbox"],
131
+ "area": ann["area"],
132
+ "id": ann["id"],
133
+ "image_id": ann["image_id"],
134
+ "category_id": self.id2id[ann["category_id"]],
135
+ "iscrowd": 0, # matters for coco_eval
136
+ }
137
+ for ann in self.anns
138
+ ]
139
+
140
+ def coco_has_zero_as_background_id(coco):
141
+ """Return true if category_id=0 is either unused, or used for background class. Else return false."""
142
+ cat_id_zero_nonbackground_exists = False
143
+ for cat in self.cats:
144
+ if cat["id"] == 0:
145
+ if cat["name"] not in ["background", "__background__"]:
146
+ cat_id_zero_nonbackground_exists = True
147
+ break
148
+ # id:0 isn't used for any categories, so by default can assume it can be used for background class:
149
+ # if not cat_id_zero_nonbackground_exists:
150
+ # return True
151
+ return not cat_id_zero_nonbackground_exists
152
+
153
+ # # true if category_id=0 is either unused, or used for background class. Else return false.
154
+ # if 0 not in list(self.id2id.keys()):
155
+ # self.cat_id_zero_nonbackground_exists = self.id2name[0] not in ["background", "__background__"]
156
+ # if cat["id"] == 0:
157
+ # if cat["name"] not in ["background", "__background__"]:
158
+ # cat_id_zero_nonbackground_exists = True
159
+
160
+
161
+ class CocoJsonBuilder(object):
162
+ """
163
+ A class used to help build coco-formatted json from scratch.
164
+ """
165
+
166
+ def __init__(
167
+ self,
168
+ categories: List[Dict[str, object]],
169
+ subset_cat_ids: list = [],
170
+ dest_path="",
171
+ dest_name="",
172
+ keep_empty_images=False,
173
+ ):
174
+ """
175
+ Args:
176
+
177
+ categories: this can be the COCO.dataset['categories'] property if you
178
+ are building a COCO json derived from an existing COCO json and don't
179
+ want to modify the classes. It's a list of dictionary objects. Each dict has
180
+ three keys: "id":int = category id, "supercatetory": str = name of parent
181
+ category, and a "name": str = name of category.
182
+
183
+ dest_path: str or pathlib.Path instance, holding the path to directory where
184
+ the new COCO formatted annotations
185
+ file (dest_name) will be saved.
186
+
187
+ dest_name: str of the filename where the generated json will be saved to.
188
+ """
189
+ self.categories = categories
190
+ self.subset_cat_ids = subset_cat_ids
191
+ self.new_categories = []
192
+ self.reindex_cat_id = {} # maps from old to new cat id
193
+ if self.subset_cat_ids:
194
+ cat_counter = 1 # one-indexing
195
+ for cat in self.categories:
196
+ if cat["id"] in self.subset_cat_ids:
197
+ new_cat = deepcopy(cat)
198
+ new_cat["id"] = cat_counter
199
+ self.reindex_cat_id[cat["id"]] = cat_counter
200
+ cat_counter += 1
201
+ self.new_categories.append(new_cat)
202
+ else:
203
+ print(f"skipping cat_id {cat['id']}")
204
+ print("New cats length: ", len(self.new_categories))
205
+ self.keep_empty_images = keep_empty_images
206
+ self.dest_path = Path(dest_path)
207
+ self.dest_name = dest_name
208
+ self.images = []
209
+ self.annotations: List[Dict[str, Any]] = []
210
+ dest_path = Path(dest_path)
211
+ dest_path.mkdir(parents=True, exist_ok=True)
212
+ # assert self.dest_path.exists(), f"dest_path: '{self.dest_path}' does not exist"
213
+ # assert (
214
+ # self.dest_path.is_dir()
215
+ # ), f"dest_path: '{self.dest_path}' is not a directory"
216
+
217
+ @staticmethod
218
+ def class_remap(source_coco: COCO, new_cats: list[dict], class_remap: dict[int, int]):
219
+ """
220
+ Remaps the categories and annotations.
221
+
222
+ :param new_cats: new categories
223
+ :type new_cats: dict
224
+ :param class_remap: maps ids from original categories (self.categories) to new categories
225
+ :type class_remap: dict
226
+ """
227
+ source_anns = source_coco.dataset["annotations"]
228
+ source_imgs = source_coco.dataset["images"]
229
+ source_cats = source_coco.dataset["categories"]
230
+
231
+ child_id_to_parent_id = class_remap
232
+ print(f"remap child_id_to_parent_id")
233
+ print(child_id_to_parent_id)
234
+
235
+ child_id_to_parent_name = {
236
+ cat["id"]: (
237
+ child_id_to_parent_id[cat["id"]] if cat["id"] in child_id_to_parent_id else None
238
+ )
239
+ for cat in source_cats
240
+ }
241
+ print(f"remap child_id_to_parent_name")
242
+ print(child_id_to_parent_name)
243
+
244
+ new_imgs = deepcopy(source_imgs)
245
+ new_anns = [
246
+ {
247
+ "segmentation": ann["segmentation"],
248
+ "bbox": ann["bbox"],
249
+ "area": ann["area"],
250
+ "id": ann["id"],
251
+ "image_id": ann["image_id"],
252
+ "category_id": child_id_to_parent_id[ann["category_id"]],
253
+ "iscrowd": 0, # matters for coco_eval
254
+ }
255
+ for ann in source_anns
256
+ if ann["category_id"] in list(child_id_to_parent_id.keys())
257
+ ]
258
+ print("len source anns: ", len(source_anns))
259
+ print("len new anns: ", len(new_anns))
260
+
261
+ dataset = {"categories": new_cats, "images": new_imgs, "annotations": new_anns}
262
+ return dataset
263
+
264
+ def generate_info(self) -> Dict[str, str]:
265
+ """returns: dictionary of descriptive info about the dataset."""
266
+ info_json = {
267
+ "description": "XView Dataset",
268
+ "url": "http://xviewdataset.org/",
269
+ "version": "1.0",
270
+ "year": 2018,
271
+ "contributor": "Defense Innovation Unit Experimental (DIUx)",
272
+ "date_created": "2018/02/22",
273
+ }
274
+ return info_json
275
+
276
+ def generate_licenses(self) -> List[Dict[str, Any]]:
277
+ """Returns the json hash for the licensing info."""
278
+ return [
279
+ {
280
+ "url": "http://creativecommons.org/licenses/by-nc-sa/4.0/",
281
+ "id": 1,
282
+ "name": "Attribution-NonCommercial-ShareAlike License",
283
+ }
284
+ ]
285
+
286
+ def add_image(self, img: Dict[str, Any], annotations: List[Dict]) -> None:
287
+ """
288
+ Add an image and it's annotations to the coco json.
289
+
290
+ Args:
291
+ img: A dictionary of image attributes. This gets added verbatim to the
292
+ json, so in typical use cases when you are building a coco json from an
293
+ existing coco json, you would just pull the entire coco.imgs[img_id]
294
+ object and pass it as the value for this parameter.
295
+ annotations: annotations of the image to add. list of dictionaries.
296
+ Each dict is one annotation, it contains all the properties of the
297
+ annotation that should appear in the coco json. For example, when using
298
+ this json builder to build JSON's for a train/val split, the
299
+ annotations can be copied straight from the coco object for the full
300
+ dataset, and passed into this parameter.
301
+
302
+ Returns: None
303
+ """
304
+ temp_anns = []
305
+ for ann in annotations:
306
+ # if builder was initialized with subset_cat_ids, only the corresponding annotations
307
+ # are re-indexed and added
308
+ if self.subset_cat_ids:
309
+ if ann["category_id"] in self.subset_cat_ids:
310
+ new_ann = deepcopy(ann)
311
+ new_ann["category_id"] = self.reindex_cat_id[ann["category_id"]]
312
+ temp_anns.append(new_ann)
313
+ else:
314
+ temp_anns.append(ann)
315
+
316
+ if self.subset_cat_ids:
317
+ if temp_anns or self.keep_empty_images:
318
+ self.images.append(img)
319
+ for ann in temp_anns:
320
+ self.annotations.append(ann)
321
+ else:
322
+ pass # no image added
323
+ else:
324
+ self.images.append(img)
325
+ for ann in temp_anns:
326
+ self.annotations.append(ann)
327
+
328
+ def get_json(self) -> Dict[str, object]:
329
+ """Returns the full json for this instance of coco json builder."""
330
+ root_json = {}
331
+ if self.new_categories:
332
+ root_json["categories"] = self.new_categories
333
+ else:
334
+ root_json["categories"] = self.categories
335
+ root_json["info"] = self.generate_info()
336
+ root_json["licenses"] = self.generate_licenses()
337
+ root_json["images"] = self.images
338
+ root_json["annotations"] = self.annotations
339
+ return root_json
340
+
341
+ def save(self) -> Path:
342
+ """Saves the json to the dest_path/dest_name location."""
343
+ file_path = self.dest_path / self.dest_name
344
+ print(f"Writing output to: '{file_path}'")
345
+ root_json = self.get_json()
346
+ with open(file_path, "w") as coco_file:
347
+ coco_file.write(json.dumps(root_json))
348
+ return file_path
349
+
350
+
351
+ class COCOShrinker:
352
+ """Shrinker takes an MS COCO formatted dataset and creates a tiny version of it."""
353
+
354
+ def __init__(self, dataset_path: Path, keep_empty_images=False) -> None:
355
+ assert dataset_path.exists(), f"dataset_path: '{dataset_path}' does not exist"
356
+ assert dataset_path.is_file(), f"dataset_path: '{dataset_path}' is not a file"
357
+ self.base_path: Path = dataset_path.parent
358
+ self.dataset_path: Path = dataset_path
359
+ self.keep_empty_images = keep_empty_images
360
+
361
+ def shrink(self, target_filename: str, size: int = 512) -> None:
362
+ """
363
+ Create a toy sized version of dataset so we can use it just for testing if code
364
+ runs, not for real training.
365
+
366
+ Args:
367
+ name: filename to save the tiny dataset to.
368
+ size: number of items to put into the output. The first <size>
369
+ elements from the input dataset are placed into the output.
370
+
371
+ Returns: Nothing, but the output dataset is saved to disk in the same directory
372
+ where the input .json lives, with the same filename but with "_tiny" added
373
+ to the filename.
374
+ """
375
+ # Create subset
376
+ assert target_filename, "'target_filename' argument must not be empty"
377
+ dest_path: Path = self.base_path / target_filename
378
+ print(f"Creating subset of {self.dataset_path}, of size: {size}, at: {dest_path}")
379
+ coco = COCO(self.dataset_path)
380
+ builder = CocoJsonBuilder(coco.dataset["categories"], dest_path.parent, dest_path.name)
381
+ subset_img_ids = coco.getImgIds()[:size]
382
+ for img_id in subset_img_ids:
383
+ anns = coco.imgToAnns[img_id]
384
+ if anns or self.keep_empty_images:
385
+ builder.add_image(coco.imgs[img_id], anns)
386
+ builder.save()
387
+ return dest_path
388
+
389
+
390
+ class COCOSubset:
391
+ """Subset takes an MS COCO formatted dataset and creates a subset according to COCO parent ids provided as a lsit."""
392
+
393
+ def __init__(self, dataset_path: Path, keep_empty_images=False) -> None:
394
+ assert dataset_path.exists(), f"dataset_path: '{dataset_path}' does not exist"
395
+ assert dataset_path.is_file(), f"dataset_path: '{dataset_path}' is not a file"
396
+ self.base_path: Path = dataset_path.parent
397
+ self.dataset_path: Path = dataset_path
398
+ self.keep_empty_images = keep_empty_images
399
+
400
+ def shrink(self, target_filename: str, subset_par_ids=[], subset_cat_ids=[], size=512) -> None:
401
+ """
402
+ Create a toy sized version of dataset so we can use it just for testing if code
403
+ runs, not for real training.
404
+
405
+ Args:
406
+ name: filename to save the tiny dataset to.
407
+ size: number of items to put into the output. The first <size>
408
+ elements from the input dataset are placed into the output.
409
+
410
+ Returns: Nothing, but the output dataset is saved to disk in the same directory
411
+ where the input .json lives, with the same filename but with "_tiny" added
412
+ to the filename.
413
+ """
414
+ # Create subset
415
+ assert target_filename, "'target_filename' argument must not be empty"
416
+ dest_path: Path = self.base_path / target_filename
417
+ print(f"Creating subset of {self.dataset_path}, of size: {size}, at: {dest_path}")
418
+ coco = COCO(self.dataset_path)
419
+
420
+ categories = coco.dataset["categories"]
421
+
422
+ builder = CocoJsonBuilder(
423
+ categories,
424
+ subset_cat_ids,
425
+ dest_path.parent,
426
+ dest_path.name,
427
+ self.keep_empty_images,
428
+ )
429
+
430
+ # subset_img_ids = coco.getImgIds()[:size]
431
+
432
+ # create index to map from parent id to list of image ids
433
+ parent_id_to_img_ids = {}
434
+ imgs = coco.dataset["images"]
435
+ parent_ids = set()
436
+ for img in imgs:
437
+ parent_ids.add(img["parent_id"])
438
+ for pid in parent_ids:
439
+ parent_id_to_img_ids[pid] = []
440
+ for img in imgs:
441
+ parent_id_to_img_ids[img["parent_id"]].append(img["id"])
442
+
443
+ if not subset_par_ids:
444
+ subset_par_ids = parent_ids
445
+
446
+ for par_id in subset_par_ids:
447
+ for img_id in parent_id_to_img_ids[par_id]:
448
+ anns = coco.imgToAnns[img_id]
449
+ if anns or self.keep_empty_images:
450
+ builder.add_image(coco.imgs[img_id], anns)
451
+ builder.save()
452
+ return dest_path
453
+
454
+
455
+ class COCORedundant:
456
+ """Creates a version of xview that creates perfect redundancy by replacing vxiew images with a single empty image (no labels)."""
457
+
458
+ def __init__(self, dataset_path: Path) -> None:
459
+ assert dataset_path.exists(), f"dataset_path: '{dataset_path}' does not exist"
460
+ assert dataset_path.is_file(), f"dataset_path: '{dataset_path}' is not a file"
461
+ self.base_path: Path = dataset_path.parent
462
+ self.dataset_path: Path = dataset_path
463
+
464
+ def redundify(
465
+ self, target_filename: str, redundant_img_fn: str, percent_redundant: float
466
+ ) -> None:
467
+ """
468
+
469
+ Args:
470
+ target_filename: filename to save the new dataset to.
471
+ redundant_img_fn: name of the image file that will be used to make redundant copies
472
+ percent_redundant: Percentage of images to make redundant
473
+
474
+ Returns: Nothing
475
+ """
476
+ assert target_filename, "'target_filename' argument must not be empty"
477
+ dest_path: Path = self.base_path / target_filename
478
+ print(f"Creating subset of {self.dataset_path} at: {dest_path}")
479
+ coco = COCO(self.dataset_path)
480
+ builder = CocoJsonBuilder(coco.dataset["categories"], dest_path.parent, dest_path.name)
481
+
482
+ # get all image ids
483
+ all_chip_ids = coco.getImgIds()
484
+
485
+ num_samples = int(percent_redundant * len(all_chip_ids))
486
+
487
+ sampled_chip_ids = random.sample(all_chip_ids, num_samples)
488
+
489
+ empty_anns = []
490
+
491
+ # make each sampled chip redundant; add to builder
492
+ for chipid in sampled_chip_ids:
493
+ cocoimg = coco.imgs[chipid]
494
+ cocoimg["file_path"] = redundant_img_fn
495
+ builder.add_image(cocoimg, empty_anns)
496
+
497
+ # add the rest of the chips to the builder
498
+ rest_of_chip_ids = list(set(all_chip_ids) - set(sampled_chip_ids))
499
+ for chipid in rest_of_chip_ids:
500
+ builder.add_image(coco.imgs[chipid], coco.imgToAnns[chipid])
501
+
502
+ builder.save()
503
+
504
+ print(
505
+ f"Total chips in new dataset: {len(builder.images)} (should match the original size of {len(all_chip_ids)})"
506
+ )
507
+ return dest_path
508
+
509
+
510
+ class COCOVideoFrames:
511
+ """Creates a version of xview that mimics sequential video frame redundancy by making perfectly redundant copies of image chips."""
512
+
513
+ def __init__(self, dataset_path: Path) -> None:
514
+ assert dataset_path.exists(), f"dataset_path: '{dataset_path}' does not exist"
515
+ assert dataset_path.is_file(), f"dataset_path: '{dataset_path}' is not a file"
516
+ self.base_path: Path = dataset_path.parent
517
+ self.dataset_path: Path = dataset_path
518
+
519
+ def vidify(
520
+ self, target_filename: str, num_chips: int, num_copies: int, debug: bool = False
521
+ ) -> None:
522
+ """
523
+
524
+ Args:
525
+ name: filename to save the tiny dataset to.
526
+ size: number of items to put into the output. The first <size>
527
+ elements from the input dataset are placed into the output.
528
+
529
+ Returns: Nothing
530
+ """
531
+ # Create subset
532
+ assert target_filename, "'target_filename' argument must not be empty"
533
+ dest_path: Path = self.base_path / target_filename
534
+ print(f"Creating subset of {self.dataset_path} at: {dest_path}")
535
+ coco = COCO(self.dataset_path)
536
+ builder = CocoJsonBuilder(coco.dataset["categories"], dest_path.parent, dest_path.name)
537
+
538
+ # subset_img_ids = coco.getImgIds()[:size]
539
+
540
+ # create index to map from parent id to list of image ids
541
+ parent_id_to_chipids = {}
542
+ imgs = coco.dataset["images"]
543
+ parent_ids = set()
544
+ for img in imgs:
545
+ parent_ids.add(img["parent_id"])
546
+
547
+ for pid in parent_ids:
548
+ parent_id_to_chipids[pid] = []
549
+ for img in imgs:
550
+ parent_id_to_chipids[img["parent_id"]].append(img["id"])
551
+
552
+ # initialize counters
553
+ chip_counter = 0
554
+ ann_counter = 0
555
+ num_chips = 4
556
+ num_copies = 10
557
+
558
+ # DEBUG COUNTERS
559
+ pid_ = 2
560
+ chipid_ = 2
561
+ i_ = 2
562
+
563
+ # DEBUG RANGES
564
+ # pids_ = list(range(2))
565
+ # chipids_ = list(range(2))
566
+ # is_ = list(range(2))
567
+
568
+ # for each parents id
569
+ for pid in parent_ids:
570
+ this_par_chipids = parent_id_to_chipids[pid]
571
+
572
+ # randomly sample num_chips chips
573
+ random_chipids = random.sample(this_par_chipids, num_chips)
574
+
575
+ # for each chip
576
+ for chipid in random_chipids:
577
+ img = coco.imgs[chipid]
578
+
579
+ # DEBUG
580
+ if debug:
581
+ if pid in list(parent_ids)[:pid_] and chipid in random_chipids[:chipid_]:
582
+ print("Original coco image...")
583
+ print(img)
584
+ print("")
585
+
586
+ # for num_copies
587
+ for i in range(num_copies):
588
+ new_cocoim = deepcopy(img)
589
+ new_cocoim["id"] = chip_counter
590
+
591
+ # DEBUG
592
+ if debug:
593
+ if (
594
+ pid in list(parent_ids)[:pid_] and chipid in random_chipids[:chipid_]
595
+ ) and i in list(range(i_)):
596
+ print("New coco image...")
597
+ print(new_cocoim)
598
+ print("")
599
+
600
+ anns = coco.imgToAnns[chipid]
601
+
602
+ # DEBUG
603
+ if debug and anns:
604
+ if (
605
+ pid in list(parent_ids)[:pid_] and chipid in random_chipids[:chipid_]
606
+ ) and i in list(range(i_)):
607
+ print("Original annotation (first)...")
608
+ print(anns[0])
609
+ print("")
610
+
611
+ new_anns = []
612
+ for ann in anns:
613
+ new_ann = deepcopy(ann)
614
+ new_ann["image_id"] = chip_counter
615
+ new_ann["id"] = ann_counter
616
+ new_anns.append(new_ann)
617
+ ann_counter += 1
618
+
619
+ builder.add_image(new_cocoim, new_anns)
620
+
621
+ # DEBUG
622
+ if debug and new_anns:
623
+ if (
624
+ pid in list(parent_ids)[:pid_] and chipid in random_chipids[:chipid_]
625
+ ) and i in list(range(i_)):
626
+ print("New annotations...")
627
+ for ann in new_anns[:3]:
628
+ print(ann)
629
+ print("")
630
+
631
+ chip_counter += 1
632
+
633
+ builder.save()
634
+
635
+ print(f"total chips created: {len(builder.images)}")
636
+ return dest_path
637
+
638
+
639
+ class COCOBoxNoise:
640
+ """Creates a version of xview that induces synthetic noise on the spatial accuracy of the bounding boxes."""
641
+
642
+ def __init__(self, dataset_path: Path) -> None:
643
+ assert dataset_path.exists(), f"dataset_path: '{dataset_path}' does not exist"
644
+ assert dataset_path.is_file(), f"dataset_path: '{dataset_path}' is not a file"
645
+ self.base_path: Path = dataset_path.parent
646
+ self.dataset_path: Path = dataset_path
647
+
648
+ def apply_box_shift(self, ann: dict, shift_vec: tuple) -> None:
649
+ """
650
+ Args:
651
+ ann: A single coco annotation
652
+ shift_vec: An (x,y) tuple where each coord controls the distance
653
+ to shift the box in each dimension in terms of a factor of box width/height
654
+ e.g. (1,0) causes a horizontal shift of '1' box width to the right and zero vertical shift
655
+
656
+ Returns: Nothing
657
+ """
658
+ x, y, w, h = ann["bbox"][0], ann["bbox"][1], ann["bbox"][2], ann["bbox"][3]
659
+ x_min = x
660
+ x_max = x + w
661
+ y_min = y
662
+ y_max = y + h
663
+
664
+ # Calc shift
665
+ x_shift = int(shift_vec[0] * w)
666
+ y_shift = int(shift_vec[1] * h)
667
+
668
+ # Apply shift
669
+ ann["bbox"] = [x + x_shift, y + y_shift, w, h]
670
+ ann["segmentation"] = [[x_min, y_min, x_min, y_max, x_max, y_max, x_max, y_min]]
671
+
672
+ def random_shift(self, shift_coeff: float) -> tuple:
673
+ """
674
+ Args:
675
+ shift_coeff: A float the controls the magnitude of the synthetic box shift
676
+
677
+ Returns: A tuple that desribes the shift in each of the x and y directions
678
+ """
679
+
680
+ shift_x = shift_coeff * random.uniform(-1, 1)
681
+ shift_y = shift_coeff * random.uniform(-1, 1)
682
+
683
+ return (shift_x, shift_y)
684
+
685
+ def adjust_if_out_of_bounds(self, ann: dict, img: dict) -> None:
686
+ """
687
+ Handles the case when a bounding box annotation breaches the image boundaries
688
+
689
+ Args:
690
+ ann: A single coco annotation
691
+ img: The coco image corresponding to the annotation
692
+
693
+ Returns: Nothing
694
+ """
695
+
696
+ im_w, im_h = img["width"], img["height"]
697
+
698
+ x, y, w, h = ann["bbox"][0], ann["bbox"][1], ann["bbox"][2], ann["bbox"][3]
699
+ x_min = x
700
+ x_max = x + w
701
+ y_min = y
702
+ y_max = y + h
703
+
704
+ # check case where:
705
+
706
+ # box completely out of bounds
707
+ if (x_min >= im_w or x_max <= 0) or (y_min >= im_h or y_max <= 0):
708
+ ann = {}
709
+ return
710
+
711
+ # box breaks left boundary
712
+ if x_min < 0:
713
+ x, x_min = 0, 0
714
+
715
+ # box breaks top boundary
716
+ if y_min < 0:
717
+ y, y_min = 0, 0
718
+
719
+ # box breaks right boundary
720
+ if x_max > im_w:
721
+ x_max = im_w
722
+ w = x_max - x_min
723
+
724
+ # box breaks bottom boundary
725
+ if y_max > im_h:
726
+ y_max = im_h
727
+ h = y_max - y_min
728
+
729
+ ann["bbox"] = [x, y, w, h]
730
+ ann["segmentation"] = [[x_min, y_min, x_min, y_max, x_max, y_max, x_max, y_min]]
731
+
732
+ return
733
+
734
+ def box_noisify(
735
+ self,
736
+ target_filename: str,
737
+ box_noise_coeff: float,
738
+ chip_ratio: float,
739
+ debug: bool = False,
740
+ ) -> None:
741
+ """
742
+
743
+ Args:
744
+ name: filename to save the tiny dataset to.
745
+ noise_coeff: Value that controls magnitude of box shift (can be greater than 1, or less than 0).
746
+ chip_ratio: the ratio of chips within each parent image that receive noise (btw 0 and 1)
747
+ debug: Setting to 'True' activates helpful print statements
748
+
749
+ Returns: Nothing
750
+ """
751
+ # Create subset
752
+ assert target_filename, "'target_filename' argument must not be empty"
753
+ dest_path: Path = self.base_path / target_filename
754
+ print(f"Creating subset of {self.dataset_path} at: {dest_path}")
755
+ coco = COCO(self.dataset_path)
756
+ builder = CocoJsonBuilder(coco.dataset["categories"], dest_path.parent, dest_path.name)
757
+
758
+ # create index to map from parent id to list of image ids
759
+ parent_id_to_chipids = {}
760
+ imgs = coco.dataset["images"]
761
+ parent_ids = set()
762
+ for img in imgs:
763
+ parent_ids.add(img["parent_id"])
764
+ for pid in parent_ids:
765
+ parent_id_to_chipids[pid] = []
766
+ for img in imgs:
767
+ parent_id_to_chipids[img["parent_id"]].append(img["id"])
768
+
769
+ # DEBUG COUNTERS
770
+ pid_ = 2
771
+ chipid_ = 2
772
+ i_ = 2
773
+
774
+ # for each parents id
775
+ for pid in parent_ids:
776
+ this_par_chipids = parent_id_to_chipids[pid]
777
+
778
+ num_chips = int(chip_ratio * len(this_par_chipids))
779
+
780
+ # randomly sample num_chips chips
781
+ noisy_chipids = random.sample(this_par_chipids, num_chips)
782
+
783
+ other_chipids = list(set(this_par_chipids) - set(noisy_chipids))
784
+
785
+ # for each chip
786
+ for chipid in noisy_chipids:
787
+ img = coco.imgs[chipid]
788
+
789
+ # DEBUG
790
+ if debug:
791
+ if pid in list(parent_ids)[:pid_] and chipid in noisy_chipids[:chipid_]:
792
+ print("Original coco image...")
793
+ print(img)
794
+ print("")
795
+
796
+ anns = coco.imgToAnns[chipid]
797
+
798
+ # DEBUG
799
+ if debug and anns:
800
+ if pid in list(parent_ids)[:pid_] and chipid in noisy_chipids[:chipid_]:
801
+ print("Original annotation (first)...")
802
+ print(anns[0])
803
+ print("")
804
+
805
+ new_anns = []
806
+ for ann in anns:
807
+ new_ann = deepcopy(ann)
808
+
809
+ # shift box label
810
+ xy_shift = self.random_shift(box_noise_coeff)
811
+ self.apply_box_shift(new_ann, xy_shift)
812
+ self.adjust_if_out_of_bounds(new_ann, img)
813
+
814
+ new_anns.append(new_ann)
815
+
816
+ if debug and new_anns:
817
+ if pid in list(parent_ids)[:pid_] and chipid in noisy_chipids[:chipid_]:
818
+ if ann["id"] == anns[0]["id"]:
819
+ print("XY shift for anns[0]...")
820
+ print(xy_shift)
821
+ print("")
822
+
823
+ builder.add_image(img, new_anns)
824
+
825
+ # DEBUG
826
+ if debug and new_anns:
827
+ if pid in list(parent_ids)[:pid_] and chipid in noisy_chipids[:chipid_]:
828
+ print("New annotations...")
829
+ for ann in new_anns[:3]:
830
+ print(ann)
831
+ print("")
832
+
833
+ for chipid in other_chipids:
834
+ builder.add_image(coco.imgs[chipid], coco.imgToAnns[chipid])
835
+
836
+ builder.save()
837
+
838
+ print(f"total chips created: {len(builder.images)}")
839
+ return dest_path
840
+
841
+
842
+ class COCONoisyCleanMerge:
843
+ """Subset takes an MS COCO formatted dataset and creates a subset according to COCO parent ids provided as a lsit."""
844
+
845
+ def __init__(
846
+ self, noisy_dataset_path: Path, clean_dataset_path: Path, indexes_path: Path
847
+ ) -> None:
848
+ assert (
849
+ noisy_dataset_path.exists()
850
+ ), f"noisy_dataset_path: '{noisy_dataset_path}' does not exist"
851
+ assert (
852
+ noisy_dataset_path.is_file()
853
+ ), f"noisy_dataset_path: '{noisy_dataset_path}' is not a file"
854
+ self.noisy_base_path: Path = noisy_dataset_path.parent
855
+ self.noisy_dataset_path: Path = noisy_dataset_path
856
+
857
+ assert (
858
+ clean_dataset_path.exists()
859
+ ), f"clean_dataset_path: '{clean_dataset_path}' does not exist"
860
+ assert (
861
+ clean_dataset_path.is_file()
862
+ ), f"clean_dataset_path: '{clean_dataset_path}' is not a file"
863
+ self.clean_base_path: Path = clean_dataset_path.parent
864
+ self.clean_dataset_path: Path = clean_dataset_path
865
+
866
+ assert indexes_path.exists(), f"indexes_path: '{indexes_path}' does not exist"
867
+ assert indexes_path.is_file(), f"indexes_path: '{indexes_path}' is not a file"
868
+ self.indexes_base_path: Path = indexes_path.parent
869
+ self.indexes_path: Path = indexes_path
870
+
871
+ def load_indexes_from_json(self, index_json_path: Path):
872
+ with open(index_json_path) as f:
873
+ loaded_data = json.load(f)
874
+ print(f"LOADING {len(loaded_data['current_indexes'])} indexes from: {index_json_path}.")
875
+ current_indexes, unlabelled_indexes = (
876
+ loaded_data["current_indexes"],
877
+ loaded_data["unlabelled_indexes"],
878
+ )
879
+ return current_indexes, unlabelled_indexes
880
+
881
+ # def get_sampled_batch_indices(split_a: float, split_b: float, al_algo) -> typing.Set[int]:
882
+ # """
883
+ # Given two splits (a, and b), returns the indices that were sampled to move
884
+ # from split a to split b. Second return value is the labelled set
885
+ # as of the start of split b.
886
+ # """
887
+ # labelled_a, unlabelled_a = utils.load_indexes(args, split_a, al_algo)
888
+ # labelled_b, unlabelled_b = utils.load_indexes(args, split_b, al_algo)
889
+ # return set(labelled_b) - set(labelled_a), set(labelled_b)
890
+
891
+ def merge_noisy_clean(self, target_filename: str) -> None:
892
+ """
893
+ Create a toy sized version of dataset so we can use it just for testing if code
894
+ runs, not for real training.
895
+
896
+ Args:
897
+ name: filename to save the tiny dataset to.
898
+ size: number of items to put into the output. The first <size>
899
+ elements from the input dataset are placed into the output.
900
+
901
+ Returns: Nothing, but the output dataset is saved to disk in the same directory
902
+ where the input .json lives, with the same filename but with "_tiny" added
903
+ to the filename.
904
+ """
905
+ # Create subset
906
+ assert target_filename, "'target_filename' argument must not be empty"
907
+ dest_path: Path = self.clean_base_path / target_filename
908
+ print(f"Creating subset of {self.clean_dataset_path} at: {dest_path}")
909
+ coco_noisy = COCO(self.noisy_dataset_path)
910
+ coco_clean = COCO(self.clean_dataset_path)
911
+ builder = CocoJsonBuilder(
912
+ coco_noisy.dataset["categories"], dest_path.parent, dest_path.name
913
+ )
914
+
915
+ # subset_img_ids = coco.getImgIds()[:size]
916
+
917
+ # get the initial noisy indexes
918
+
919
+ init_indexes, _ = self.load_indexes_from_json(self.indexes_path)
920
+
921
+ clean_imgs = coco_clean.dataset["images"]
922
+
923
+ for img in clean_imgs:
924
+ anns = coco_clean.imgToAnns[img["id"]]
925
+ if img["id"] in set(init_indexes):
926
+ # img = coco_noisy.imgs[img["id"]]
927
+ anns = coco_noisy.imgToAnns[img["id"]]
928
+ builder.add_image(img, anns)
929
+ builder.save()
930
+
931
+ return dest_path
932
+
933
+
934
+ class CocoClassDistHelper(COCO):
935
+ """
936
+ A subclass of pycococtools.coco that adds a method(s) to calculate class
937
+ distribution.
938
+ """
939
+
940
+ def __init__(
941
+ self,
942
+ annotation_file: str = None,
943
+ create_mapping: bool = False,
944
+ mapping_csv: str = None,
945
+ write_to_JSON: bool = None,
946
+ dict_pass: bool = False,
947
+ ):
948
+ # super().__init__(annotation_file, create_mapping, mapping_csv, write_to_JSON, dict_pass)
949
+ super().__init__(annotation_file)
950
+ # list of dictionaries. 3 keys each: (supercategory, id, name):
951
+ self.cats = self.loadCats(self.getCatIds())
952
+ list.sort(self.cats, key=lambda c: c["id"])
953
+ # Dictionaries to lookup category and supercategory names from category
954
+ # id:
955
+ self.cat_name_lookup = {c["id"]: c["name"] for c in self.cats}
956
+ self.supercat_name_lookup = {
957
+ c["id"]: c["supercategory"] if "supercategory" in c else "None" for c in self.cats
958
+ }
959
+ # List of integers, image id's:
960
+ self.img_ids = self.getImgIds()
961
+ # List of strings, each is an annotation id:
962
+ self.ann_ids = self.getAnnIds(imgIds=self.img_ids)
963
+ self.anns_list = self.loadAnns(self.ann_ids)
964
+ print(f"num images: {len(self.img_ids)}")
965
+ # print(F"num annotation id's: {len(self.ann_ids)}")
966
+ print(f"num annotations: {len(self.anns)}")
967
+ # print(F"First annotation: {self.anns[0]}")
968
+ # Create self.img_ann_counts, a dictionary keyed off of img_id. For
969
+ # each img_id it stores a collections.Counter object, that has a count
970
+ # of how many annotations for each category/class there are for that
971
+ # img_id
972
+ self.img_ann_counts = {}
973
+ for img_id in self.imgToAnns.keys():
974
+ imgAnnCounter = Counter({cat["name"]: 0 for cat in self.cats})
975
+ anns = self.imgToAnns[img_id]
976
+ for ann in anns:
977
+ imgAnnCounter[self.cat_name_lookup[ann["category_id"]]] += 1
978
+ self.img_ann_counts[img_id] = imgAnnCounter
979
+ self.num_cats = len(self.cats)
980
+ self.cat_img_counts: Dict[int, int] = {
981
+ c["id"]: float(len(np.unique(self.catToImgs[c["id"]]))) for c in self.cats
982
+ }
983
+ self.cat_ann_counts: Dict[int, int] = defaultdict(int)
984
+ for cat_id in self.cat_name_lookup.keys():
985
+ self.cat_ann_counts[cat_id] = 0
986
+ for ann in self.anns.values():
987
+ self.cat_ann_counts[ann["category_id"]] += 1
988
+ self.cat_img_counts = OrderedDict(sorted(self.cat_img_counts.items()))
989
+ self.cat_ann_counts = OrderedDict(sorted(self.cat_ann_counts.items()))
990
+
991
+ def get_class_dist(self, img_ids: List[int] = None):
992
+ """
993
+ Args:
994
+ img_ids: List of image id's. If None, distribution is calculated for
995
+ all image id's in the dataset.
996
+
997
+ Returns: A dictionary representing the class distribution. Keys are category
998
+ names Values are counts (e.g., how many annotations are there with that
999
+ category/class label) np.array of class percentages. Entries are sorted by
1000
+ category_id (same as self.cats)
1001
+ """
1002
+ cat_counter = Counter({cat["name"]: 0 for cat in self.cats})
1003
+ if img_ids is None:
1004
+ img_ids = self.imgToAnns.keys()
1005
+
1006
+ for img_id in img_ids:
1007
+ if img_id not in self.imgToAnns:
1008
+ continue
1009
+ cat_counter += self.img_ann_counts[img_id]
1010
+ # Stupid hack to fix issue where Counter drops zero counts when we did Counter + Counter above
1011
+ for cat in self.cats:
1012
+ if cat["name"] not in cat_counter:
1013
+ cat_counter[cat["name"]] = 0
1014
+
1015
+ cat_counter = {k: v for k, v in sorted(cat_counter.items(), key=lambda item: item[0])}
1016
+
1017
+ # Convert to np array where entries correspond to cat_id's sorted asc.:
1018
+ total = float(sum(cat_counter.values()))
1019
+ cat_names = [c["name"] for c in self.cats]
1020
+ cat_percents = np.zeros((self.num_cats))
1021
+ for idx, cat_name in enumerate(sorted(cat_names)):
1022
+ cat_percents[idx] = cat_counter[cat_name] / total
1023
+ assert len(cat_counter) == len(cat_percents), f"{len(cat_counter)} == {len(cat_percents)}"
1024
+
1025
+ return cat_counter, cat_percents
1026
+
1027
+ def get_class_img_counts(self) -> dict[int, Any]:
1028
+ """
1029
+ Returns dictionary whose keys are class_id's and values are number of images with one or
1030
+ more instances of that class
1031
+ """
1032
+ return self.cat_img_counts
1033
+
1034
+ def get_class_ann_counts(self):
1035
+ """
1036
+ Returns dictionary whose keys are class_id's and values are number of annotations available
1037
+ for that class
1038
+ """
1039
+ return self.cat_ann_counts
1040
+
1041
+
1042
+ def split(data: List, test_size: float = 0.2, random_state=None) -> Tuple[List[Any], List[Any]]:
1043
+ """
1044
+ Similar to scikit learn, creates train/test splits of the passed in data.
1045
+
1046
+ Args:
1047
+ data: A list or iterable type, of data to split.
1048
+ test_size: value in [0, 1.0] indicating the size of the test split.
1049
+ random_state: an int or RandomState object to seed the numpy randomness.
1050
+
1051
+ Returns: 2-tuple of lists; (train, test), where each item in data has been placed
1052
+ into either the train or test split.
1053
+ """
1054
+ n = len(data)
1055
+ num_test = int(np.ceil(test_size * n))
1056
+ # print(F"n:{n}, num_test:{num_test}")
1057
+ np.random.seed(random_state)
1058
+ test_idx = set(np.random.choice(range(n), num_test))
1059
+ data_test, data_train = list(), list()
1060
+ for idx, datum in enumerate(data):
1061
+ if idx in test_idx:
1062
+ data_test.append(data[idx])
1063
+ else:
1064
+ data_train.append(data[idx])
1065
+ return data_train, data_test
1066
+
1067
+
1068
+ def split2(
1069
+ source_map: Dict,
1070
+ sources: List,
1071
+ test_size: float = 0.2,
1072
+ random_state=None,
1073
+ sample_rate: float = 0.05,
1074
+ ) -> Tuple[List[Any], List[Any]]:
1075
+ """
1076
+ Similar to scikit learn, creates train/test splits of the passed in data.
1077
+ Assumes that splits need to be senstive to input source (name prefix). Checks by first
1078
+ mapping and splitting data sources with seed. Then samples randomly within each
1079
+ source with the seed.
1080
+
1081
+ Args:
1082
+ source_map: A dictionary of source prefixes mapped to a list of sorted (for deterministic splits) image file names.
1083
+ source: A sorted list of source prefixes (for deterministic splits)
1084
+ test_size: value in [0, 1.0] indicating the size of the test split.
1085
+ random_state: an int or RandomState object to seed the numpy randomness.
1086
+ sample_rate: float in [0,1.0] dictating
1087
+
1088
+ Returns: 2-tuple of lists; (train, test), where each item in data has been placed
1089
+ into either the train or test split.
1090
+ """
1091
+
1092
+ num_sources = len(sources)
1093
+ num_test = int(np.ceil(test_size * num_sources))
1094
+
1095
+ np.random.seed(random_state)
1096
+ test_source_idxs = set(np.random.choice(range(num_sources), num_test))
1097
+
1098
+ def sample_from_source(images):
1099
+ num_images = len(images)
1100
+ num_sample = int(np.ceil(sample_rate * num_images))
1101
+ np.random.seed(random_state)
1102
+ sample_image_idx = set(np.random.choice(range(num_images), num_sample))
1103
+ data_test = list()
1104
+ for idx, image in enumerate(images):
1105
+ if idx in sample_image_idx:
1106
+ data_test.append(images[idx])
1107
+ return data_test
1108
+
1109
+ data_test, data_train = list(), list()
1110
+ for idx, datum in enumerate(sources):
1111
+ if idx in test_source_idxs:
1112
+ data_test += sample_from_source(source_map[sources[idx]])
1113
+ else:
1114
+ data_train += sample_from_source(source_map[sources[idx]])
1115
+
1116
+ return data_train, data_test
1117
+
1118
+
1119
+ @dataclass
1120
+ class bbox:
1121
+ """
1122
+ Data class to store a bounding box annotation instance
1123
+ """
1124
+
1125
+ img_id: int
1126
+ cat_id: int
1127
+ x_center: float
1128
+ y_center: float
1129
+ width: float
1130
+ height: float
1131
+
1132
+
1133
+ class Img:
1134
+ """A helper class to store image info and annotations."""
1135
+
1136
+ anns: List[bbox]
1137
+
1138
+ def __init__(self, id: int, filename: str, width: float, height: float) -> None:
1139
+ self.id: int = id
1140
+ self.filename: str = filename
1141
+ self.width: float = width
1142
+ self.height: float = height
1143
+ self.anns = []
1144
+
1145
+ def add_ann(self, ann: bbox) -> None:
1146
+ """Add an annotation to the image"""
1147
+ self.anns.append(ann)
1148
+
1149
+ def get_anns(self) -> List[bbox]:
1150
+ """
1151
+ Gets annotations, possibly filters them in prep for converting to yolo/Darknet
1152
+ format.
1153
+ """
1154
+ return self.anns
1155
+
1156
+ def to_darknet(self, box: bbox) -> bbox:
1157
+ """Convert a BBox from coco to Darknet format"""
1158
+ # COCO bboxes define the topleft corner of the box, but yolo expects the x/y
1159
+ # coords to reference the center of the box. yolo also requires the coordinates
1160
+ # and widths to be scaled by image dims, down to the range [0.0, 1.0]
1161
+ return bbox(
1162
+ self.id,
1163
+ box.cat_id,
1164
+ (box.x_center + (box.width / 2.0)) / self.width,
1165
+ (box.y_center + (box.height / 2.0)) / self.height,
1166
+ box.width / self.width,
1167
+ box.height / self.height,
1168
+ )
1169
+
1170
+ def write_darknet_anns(self, label_file) -> None:
1171
+ """Writes bounding boxes to specified file in yolo/Darknet format"""
1172
+ # It's a bit leaky abstraction to have Img handle writing to file but it's
1173
+ # convenient b/c we have access to img height and width here to scale the bbox
1174
+ # dims. Same goes for .to_darknet()
1175
+ anns = self.get_anns()
1176
+ for box in anns:
1177
+ box = self.to_darknet(box)
1178
+ label_file.write(
1179
+ f"{box.cat_id} {box.x_center} {box.y_center} {box.width} {box.height}\n"
1180
+ )
1181
+
1182
+ def has_anns(self) -> bool:
1183
+ """
1184
+ Returns true if this image instance has at least one bounding box (after any
1185
+ filters are applied)
1186
+ """
1187
+ # TODO: Can add filter to only return true if annotations have non-zero area: I
1188
+ # saw around ~5 or 6 annotations in the v2_train_chipped.json that had zero
1189
+ # area, not sure if those might cause problems for yolo
1190
+ return self.anns
1191
+
1192
+ def get_label_path(self, base_path: Path) -> str:
1193
+ return base_path / self.filename.replace("jpeg", "txt").replace("jpg", "txt")
1194
+
1195
+ def get_img_path(self, base_path: Path, dataset_name: str, data_split: str) -> str:
1196
+ return base_path / dataset_name.replace("_tiny", "") / "images" / data_split / self.filename
1197
+
1198
+
1199
+ class CocoToDarknet:
1200
+ """Class that helps convert an MS COCO formatted dataset to yolo/Darknet format"""
1201
+
1202
+ @staticmethod
1203
+ def convert(ann_path: Path, base_path: Path, dataset_name: str, data_split: str) -> None:
1204
+ """Convert specified dataset to Darknet format.
1205
+
1206
+ Details:
1207
+ - Labels are written to base_path/<dataset_name>/labels/<data_split>/*.txt
1208
+ - A file containing list of category names, is written to
1209
+ <base_path>/<dataset_name>.names
1210
+ """
1211
+ coco = COCO(ann_path)
1212
+ images = CocoToDarknet.build_db(coco)
1213
+ # Make paths:
1214
+ labels_path = base_path / dataset_name / "labels" / data_split
1215
+ labels_path.mkdir(parents=True, exist_ok=True)
1216
+ names_path = base_path / f"{dataset_name}.names"
1217
+ image_paths = CocoToDarknet.generate_label_files(
1218
+ images, labels_path, base_path, dataset_name, data_split
1219
+ )
1220
+ CocoToDarknet.generate_image_list(base_path, dataset_name, image_paths, data_split)
1221
+ CocoToDarknet.generate_names(names_path, coco)
1222
+
1223
+ @staticmethod
1224
+ def generate_names(names_path: Path, coco: COCO) -> None:
1225
+ categories = [c["name"] + "\n" for c in coco.dataset["categories"]]
1226
+ with open(names_path, "w") as names_file:
1227
+ names_file.writelines(categories)
1228
+
1229
+ @staticmethod
1230
+ def generate_label_files(
1231
+ images: Dict[int, Img],
1232
+ labels_path: Path,
1233
+ base_path: Path,
1234
+ dataset_name: str,
1235
+ data_split: str,
1236
+ ) -> List[str]:
1237
+ """
1238
+ Generates one .txt file for each image in the coco-formatted dataset. The .txt
1239
+ files contain the annotations in yolo/Darknet format.
1240
+ """
1241
+ # Convert:
1242
+ img_paths = set()
1243
+ for img_id, img in images.items():
1244
+ if img.has_anns():
1245
+ label_path = labels_path / img.get_label_path(labels_path)
1246
+ with open(label_path, "w") as label_file:
1247
+ img.write_darknet_anns(label_file)
1248
+ img_path = img.get_img_path(base_path, dataset_name, data_split)
1249
+ assert img_path.exists(), f"Image doesn't exist {img_path}"
1250
+ img_paths.add(str(img_path) + "\n")
1251
+ return list(img_paths)
1252
+
1253
+ @staticmethod
1254
+ def generate_image_list(
1255
+ base_path: Path, dataset_name: str, image_paths: List[str], data_split: str
1256
+ ) -> None:
1257
+ """Generates train.txt, val.txt, etc, txt file with list of image paths."""
1258
+ listing_path = base_path / dataset_name / f"{data_split}.txt"
1259
+ print("Listing path: ", listing_path)
1260
+ with open(listing_path, "w") as listing_file:
1261
+ listing_file.writelines(image_paths)
1262
+
1263
+ @staticmethod
1264
+ def build_db(coco: COCO) -> Dict[int, Img]:
1265
+ """
1266
+ Builds a datastructure of images. All annotations are grouped into their
1267
+ corresponding images to facilitate generating the Darknet formatted metadata.
1268
+
1269
+ Args:
1270
+ coco: a pycocotools.coco COCO instance
1271
+
1272
+ Returns: Dictionary whose keys are image id's, and values are Img instances that
1273
+ are loaded with all the image info and annotations from the coco-formatted
1274
+ json
1275
+ """
1276
+ anns = coco.dataset["annotations"]
1277
+ images: Dict[int, Img] = {}
1278
+ # Build images data structure:
1279
+ for i, ann in enumerate(anns):
1280
+ ann = CocoToDarknet.get_ann(ann)
1281
+ if ann.img_id not in images:
1282
+ coco_img = coco.dataset["images"][ann.img_id]
1283
+ images[ann.img_id] = Img(
1284
+ ann.img_id,
1285
+ coco_img["file_name"],
1286
+ float(coco_img["width"]),
1287
+ float(coco_img["height"]),
1288
+ )
1289
+ img = images[ann.img_id]
1290
+ img.add_ann(ann)
1291
+ return images
1292
+
1293
+ @staticmethod
1294
+ def get_ann(ann):
1295
+ """
1296
+ Gets a bbox instance from an annotation element pulled from the coco-formatted
1297
+ json
1298
+ """
1299
+ box = ann["bbox"]
1300
+ return bbox(ann["image_id"], ann["category_id"], box[0], box[1], box[2], box[3])
1301
+
1302
+
1303
+ class CocoToComsat:
1304
+ """Class that helps convert an MS COCO formatted dataset to the COMSAT format"""
1305
+
1306
+ @staticmethod
1307
+ def convert(src_images: Path, src_instances: Path, dst_base: Path) -> None:
1308
+ """
1309
+ Convert source coco dataset to Comsat format.
1310
+ """
1311
+ coco = COCO(src_instances)
1312
+
1313
+ # for each image
1314
+ for image in coco.dataset["images"]:
1315
+ # create nested sub directories for this image
1316
+ CocoToComsat.init_nested_dirs(dst_base, image)
1317
+
1318
+ # add image to the "imagery" subfolder
1319
+ CocoToComsat.add_image(src_images, dst_base, image)
1320
+
1321
+ # create "labels" json and add to subfolder
1322
+ CocoToComsat.add_labels(dst_base, image, coco)
1323
+
1324
+ # create "metadata" json and add to subfolder
1325
+ CocoToComsat.add_metadata(dst_base, image)
1326
+
1327
+ @staticmethod
1328
+ def init_nested_dirs(dst_base: Path, image):
1329
+ """
1330
+ Initializes a new set of nested folders.
1331
+ """
1332
+ # Create paths
1333
+ image_id = Path(str(image["id"]))
1334
+ imagery_path = dst_base / image_id / "imagery"
1335
+ labels_path = dst_base / image_id / "labels"
1336
+ metadata_path = dst_base / image_id / "metadata"
1337
+
1338
+ # Make dirs
1339
+ imagery_path.mkdir(parents=True, exist_ok=True)
1340
+ labels_path.mkdir(parents=True, exist_ok=True)
1341
+ metadata_path.mkdir(parents=True, exist_ok=True)
1342
+
1343
+ @staticmethod
1344
+ def add_image(src_images: Path, dst_base: Path, image):
1345
+ """
1346
+ .
1347
+ """
1348
+ image_id = Path(str(image["id"]))
1349
+ image_file = Path(image["file_name"])
1350
+ source_path = src_images / image_file
1351
+ imagery_path = dst_base / image_id / "imagery"
1352
+
1353
+ copy(source_path, imagery_path)
1354
+
1355
+ @staticmethod
1356
+ def add_labels(dst_base: Path, image, coco):
1357
+ """
1358
+ .
1359
+ """
1360
+ comsat_labels = []
1361
+
1362
+ default_comsat_label = {
1363
+ "type": "Feature",
1364
+ "geometry": {
1365
+ "type": "Polygon",
1366
+ "coordinates": [
1367
+ [
1368
+ [39.542, 46.534],
1369
+ [39.542, 46.534],
1370
+ [39.542, 46.534],
1371
+ [39.542, 46.534],
1372
+ ]
1373
+ ],
1374
+ },
1375
+ "properties": {
1376
+ "label": {
1377
+ "name": "Category 1",
1378
+ "ontology_iri": "http://ontology.jhuapl.edu/",
1379
+ },
1380
+ "pixel_coordinates": [[367, 520], [367, 520], [367, 520], [367, 520]],
1381
+ "label_acquisition_date": None,
1382
+ "image_acquisition_date": None,
1383
+ "peer_reviewed": False,
1384
+ },
1385
+ }
1386
+
1387
+ # Get annotations for this image
1388
+ coco_anns = coco.imgToAnns[image["id"]]
1389
+
1390
+ for ann in coco_anns:
1391
+ new_comsat_label = deepcopy(default_comsat_label)
1392
+ comsat_poly_coords = CocoToComsat.coco_to_comsat_poly_coords(ann, image)
1393
+ comsat_pixel_coords = CocoToComsat.coco_to_comsat_pixel_coords(ann)
1394
+ comsat_label_name = CocoToComsat.get_category_name(ann, coco)
1395
+
1396
+ new_comsat_label["geometry"]["coordinates"] = comsat_poly_coords
1397
+ new_comsat_label["properties"]["pixel_coordinates"] = comsat_pixel_coords
1398
+ new_comsat_label["properties"]["label"]["name"] = comsat_label_name
1399
+
1400
+ comsat_labels.append(new_comsat_label)
1401
+
1402
+ root_json = {"type": "FeatureCollection", "features": comsat_labels}
1403
+
1404
+ image_id = str(image["id"])
1405
+ labels_file_name = Path(f"LABELS_{image_id}.json")
1406
+
1407
+ # labels_base = dst_base / image_id / "labels"
1408
+
1409
+ labels_file_path = dst_base / image_id / "labels" / labels_file_name
1410
+
1411
+ # save labels to json
1412
+ with open(labels_file_path, "w") as labels_file:
1413
+ labels_file.write(json.dumps(root_json))
1414
+
1415
+ @staticmethod
1416
+ def coco_to_comsat_pixel_coords(ann):
1417
+ """
1418
+ Reformats coco segmentation to comsat in terms of pixel coordinates
1419
+
1420
+ - coco poly format is [ [x1, y1, x2, y2, ... ] ]
1421
+ - comsat poly format is [ [ [x1, y1], [x2, y2], ... ] ]
1422
+ """
1423
+ coco_pixel_coords = ann["segmentation"]
1424
+
1425
+ comsat_pixel_coords = []
1426
+
1427
+ for group in coco_pixel_coords:
1428
+ # split the coco pixel coords by even/odd elements and zip
1429
+ comsat_pixel_coords.append([[int(x), int(y)] for x, y in zip(group[::2], group[1::2])])
1430
+
1431
+ return comsat_pixel_coords
1432
+
1433
+ @staticmethod
1434
+ def coco_to_comsat_poly_coords(ann, image):
1435
+ """
1436
+ Reformats coco segmentation to comsat in terms of image dim percentage coordinates
1437
+
1438
+ - coco poly format is [ [x1, y1, x2, y2, ... ] ]
1439
+ - comsat poly format is [ [ [x1, y1], [x2, y2], ... ] ]
1440
+ """
1441
+
1442
+ w = float(image["width"])
1443
+ h = float(image["height"])
1444
+
1445
+ comsat_pixel_coords = CocoToComsat.coco_to_comsat_pixel_coords(ann)
1446
+
1447
+ comsat_poly_coords = []
1448
+
1449
+ for group in comsat_pixel_coords:
1450
+ # divide pixel coords by image dims
1451
+ comsat_poly_coords.append([[x[0] / w, x[1] / h] for x in group])
1452
+
1453
+ return comsat_poly_coords
1454
+
1455
+ @staticmethod
1456
+ def get_category_name(ann, coco):
1457
+ """
1458
+ Returns the category name for the annotation given
1459
+ """
1460
+
1461
+ cats = coco.loadCats(coco.getCatIds())
1462
+
1463
+ list.sort(cats, key=lambda c: c["id"])
1464
+
1465
+ # Dictionary to lookup category name from category id:
1466
+ cat_name_lookup = {c["id"]: c["name"] for c in cats}
1467
+
1468
+ return cat_name_lookup[ann["category_id"]]
1469
+
1470
+ @staticmethod
1471
+ def add_metadata(dst_base: Path, image):
1472
+ """
1473
+ .
1474
+ """
1475
+ root_json = {
1476
+ "image_id": image["id"],
1477
+ "image_width": image["width"],
1478
+ "image_height": image["height"],
1479
+ "image_source": "XVIEW",
1480
+ }
1481
+
1482
+ image_id = str(image["id"])
1483
+ metadata_file_name = f"METADATA_{image_id}.json"
1484
+
1485
+ metadata_file_path = dst_base / image_id / "metadata" / metadata_file_name
1486
+
1487
+ # save labels to json
1488
+ with open(metadata_file_path, "w") as metadata_file:
1489
+ metadata_file.write(json.dumps(root_json))
1490
+
1491
+ @staticmethod
1492
+ def generate_names(names_path: Path, coco: COCO) -> None:
1493
+ categories = [c["name"] + "\n" for c in coco.dataset["categories"]]
1494
+ with open(names_path, "w") as names_file:
1495
+ names_file.writelines(categories)
1496
+
1497
+ @staticmethod
1498
+ def generate_label_files(
1499
+ images: Dict[int, Img],
1500
+ labels_path: Path,
1501
+ base_path: Path,
1502
+ dataset_name: str,
1503
+ data_split: str,
1504
+ ) -> List[str]:
1505
+ """
1506
+ Generates one .txt file for each image in the coco-formatted dataset. The .txt
1507
+ files contain the annotations in yolo/Darknet format.
1508
+ """
1509
+ # Convert:
1510
+ img_paths = set()
1511
+ for img_id, img in images.items():
1512
+ if img.has_anns():
1513
+ label_path = labels_path / img.get_label_path(labels_path)
1514
+ with open(label_path, "w") as label_file:
1515
+ img.write_darknet_anns(label_file)
1516
+ img_path = img.get_img_path(base_path, dataset_name, data_split)
1517
+ assert img_path.exists(), f"Image doesn't exist {img_path}"
1518
+ img_paths.add(str(img_path) + "\n")
1519
+ return list(img_paths)
1520
+
1521
+ @staticmethod
1522
+ def generate_image_list(
1523
+ base_path: Path, dataset_name: str, image_paths: List[str], data_split: str
1524
+ ) -> None:
1525
+ """Generates train.txt, val.txt, etc, txt file with list of image paths."""
1526
+ listing_path = base_path / dataset_name / f"{data_split}.txt"
1527
+ print("Listing path: ", listing_path)
1528
+ with open(listing_path, "w") as listing_file:
1529
+ listing_file.writelines(image_paths)
1530
+
1531
+ @staticmethod
1532
+ def build_db(coco: COCO) -> Dict[int, Img]:
1533
+ """
1534
+ Builds a datastructure of images. All annotations are grouped into their
1535
+ corresponding images to facilitate generating the Darknet formatted metadata.
1536
+
1537
+ Args:
1538
+ coco: a pycocotools.coco COCO instance
1539
+
1540
+ Returns: Dictionary whose keys are image id's, and values are Img instances that
1541
+ are loaded with all the image info and annotations from the coco-formatted
1542
+ json
1543
+ """
1544
+ anns = coco.dataset["annotations"]
1545
+ images: Dict[int, Img] = {}
1546
+ # Build images data structure:
1547
+ for i, ann in enumerate(anns):
1548
+ ann = CocoToDarknet.get_ann(ann)
1549
+ if ann.img_id not in images:
1550
+ coco_img = coco.dataset["images"][ann.img_id]
1551
+ images[ann.img_id] = Img(
1552
+ ann.img_id,
1553
+ coco_img["file_name"],
1554
+ float(coco_img["width"]),
1555
+ float(coco_img["height"]),
1556
+ )
1557
+ img = images[ann.img_id]
1558
+ img.add_ann(ann)
1559
+ return images
1560
+
1561
+ @staticmethod
1562
+ def get_ann(ann):
1563
+ """
1564
+ Gets a bbox instance from an annotation element pulled from the coco-formatted
1565
+ json
1566
+ """
1567
+ box = ann["bbox"]
1568
+ return bbox(ann["image_id"], ann["category_id"], box[0], box[1], box[2], box[3])
src/xview/category_id_mapping.json ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "11": "0",
3
+ "12": "1",
4
+ "13": "2",
5
+ "15": "3",
6
+ "17": "4",
7
+ "18": "5",
8
+ "19": "6",
9
+ "20": "7",
10
+ "21": "8",
11
+ "23": "9",
12
+ "24": "10",
13
+ "25": "11",
14
+ "26": "12",
15
+ "27": "13",
16
+ "28": "14",
17
+ "29": "15",
18
+ "32": "16",
19
+ "33": "17",
20
+ "34": "18",
21
+ "35": "19",
22
+ "36": "20",
23
+ "37": "21",
24
+ "38": "22",
25
+ "40": "23",
26
+ "41": "24",
27
+ "42": "25",
28
+ "44": "26",
29
+ "45": "27",
30
+ "47": "28",
31
+ "49": "29",
32
+ "50": "30",
33
+ "51": "31",
34
+ "52": "32",
35
+ "53": "33",
36
+ "54": "34",
37
+ "55": "35",
38
+ "56": "36",
39
+ "57": "37",
40
+ "59": "38",
41
+ "60": "39",
42
+ "61": "40",
43
+ "62": "41",
44
+ "63": "42",
45
+ "64": "43",
46
+ "65": "44",
47
+ "66": "45",
48
+ "71": "46",
49
+ "72": "47",
50
+ "73": "48",
51
+ "74": "49",
52
+ "76": "50",
53
+ "77": "51",
54
+ "79": "52",
55
+ "83": "53",
56
+ "84": "54",
57
+ "86": "55",
58
+ "89": "56",
59
+ "91": "57",
60
+ "93": "58",
61
+ "94": "59"
62
+ }
src/xview/slice_xview.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import fire
2
+ from sahi.scripts.slice_coco import slice
3
+
4
+ MAX_WORKERS = 20
5
+ IGNORE_NEGATIVE_SAMPLES = False
6
+
7
+
8
+ def slice_xview(
9
+ image_dir: str, dataset_json_path: str, output_dir: str, slice_size: int, overlap_ratio: float
10
+ ):
11
+ slice(
12
+ image_dir=image_dir,
13
+ dataset_json_path=dataset_json_path,
14
+ output_dir=output_dir,
15
+ slice_size=slice_size,
16
+ overlap_ratio=overlap_ratio,
17
+ )
18
+
19
+
20
+ if __name__ == "__main__":
21
+ fire.Fire(slice_xview)
src/xview/xview_class_labels.txt ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 11:Fixed-wing Aircraft
2
+ 12:Small Aircraft
3
+ 13:Cargo Plane
4
+ 15:Helicopter
5
+ 17:Passenger Vehicle
6
+ 18:Small Car
7
+ 19:Bus
8
+ 20:Pickup Truck
9
+ 21:Utility Truck
10
+ 23:Truck
11
+ 24:Cargo Truck
12
+ 25:Truck w/Box
13
+ 26:Truck Tractor
14
+ 27:Trailer
15
+ 28:Truck w/Flatbed
16
+ 29:Truck w/Liquid
17
+ 32:Crane Truck
18
+ 33:Railway Vehicle
19
+ 34:Passenger Car
20
+ 35:Cargo Car
21
+ 36:Flat Car
22
+ 37:Tank car
23
+ 38:Locomotive
24
+ 40:Maritime Vessel
25
+ 41:Motorboat
26
+ 42:Sailboat
27
+ 44:Tugboat
28
+ 45:Barge
29
+ 47:Fishing Vessel
30
+ 49:Ferry
31
+ 50:Yacht
32
+ 51:Container Ship
33
+ 52:Oil Tanker
34
+ 53:Engineering Vehicle
35
+ 54:Tower crane
36
+ 55:Container Crane
37
+ 56:Reach Stacker
38
+ 57:Straddle Carrier
39
+ 59:Mobile Crane
40
+ 60:Dump Truck
41
+ 61:Haul Truck
42
+ 62:Scraper/Tractor
43
+ 63:Front loader/Bulldozer
44
+ 64:Excavator
45
+ 65:Cement Mixer
46
+ 66:Ground Grader
47
+ 71:Hut/Tent
48
+ 72:Shed
49
+ 73:Building
50
+ 74:Aircraft Hangar
51
+ 76:Damaged Building
52
+ 77:Facility
53
+ 79:Construction Site
54
+ 83:Vehicle Lot
55
+ 84:Helipad
56
+ 86:Storage Tank
57
+ 89:Shipping container lot
58
+ 91:Shipping Container
59
+ 93:Pylon
60
+ 94:Tower
src/xview/xview_to_coco.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ from collections import defaultdict
3
+ from pathlib import Path
4
+ from typing import Dict, List
5
+
6
+ import fire
7
+ import numpy as np
8
+ from PIL import Image
9
+ from sahi.utils.coco import Coco, CocoAnnotation, CocoCategory, CocoImage
10
+ from sahi.utils.file import load_json, save_json
11
+ from tqdm import tqdm
12
+
13
+ # fix the seed
14
+ random.seed(13)
15
+
16
+
17
+ def xview_to_coco(
18
+ train_images_dir,
19
+ train_geojson_path,
20
+ output_dir,
21
+ train_split_rate=0.75,
22
+ category_id_remapping=None,
23
+ ):
24
+ """
25
+ Converts visdrone-det annotations into coco annotation.
26
+
27
+ Args:
28
+ train_images_dir: str
29
+ 'train_images' folder directory
30
+ train_geojson_path: str
31
+ 'xView_train.geojson' file path
32
+ output_dir: str
33
+ Output folder directory
34
+ train_split_rate: bool
35
+ Train split ratio
36
+ category_id_remapping: dict
37
+ Used for selecting desired category ids and mapping them.
38
+ If not provided, xView mapping will be used.
39
+ format: str(id) to str(id)
40
+ """
41
+
42
+ # init vars
43
+ category_id_to_name = {}
44
+ with open("src/xview/xview_class_labels.txt", encoding="utf8") as f:
45
+ lines = f.readlines()
46
+ for line in lines:
47
+ category_id = line.split(":")[0]
48
+ category_name = line.split(":")[1].replace("\n", "")
49
+ category_id_to_name[category_id] = category_name
50
+
51
+ if category_id_remapping is None:
52
+ category_id_remapping = load_json("src/xview/category_id_mapping.json")
53
+ category_id_remapping
54
+
55
+ # init coco object
56
+ coco = Coco()
57
+ # append categories
58
+ for category_id, category_name in category_id_to_name.items():
59
+ if category_id in category_id_remapping.keys():
60
+ remapped_category_id = category_id_remapping[category_id]
61
+ coco.add_category(
62
+ CocoCategory(id=int(remapped_category_id), name=category_name)
63
+ )
64
+
65
+ # parse xview data
66
+ coords, chips, classes, image_name_to_annotation_ind = get_labels(
67
+ train_geojson_path
68
+ )
69
+ image_name_list = get_ordered_image_name_list(image_name_to_annotation_ind)
70
+
71
+ # convert xView data to COCO format
72
+ for image_name in tqdm(image_name_list, "Converting xView data into COCO format"):
73
+ # create coco image object
74
+ width, height = Image.open(Path(train_images_dir) / image_name).size
75
+ coco_image = CocoImage(file_name=image_name, height=height, width=width)
76
+
77
+ annotation_ind_list = image_name_to_annotation_ind[image_name]
78
+
79
+ # iterate over image annotations
80
+ for annotation_ind in annotation_ind_list:
81
+ bbox = coords[annotation_ind].tolist()
82
+ category_id = str(int(classes[annotation_ind].item()))
83
+ coco_bbox = [bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]]
84
+ if category_id in category_id_remapping.keys():
85
+ category_name = category_id_to_name[category_id]
86
+ remapped_category_id = category_id_remapping[category_id]
87
+ else:
88
+ continue
89
+ # create coco annotation and append it to coco image
90
+ coco_annotation = CocoAnnotation(
91
+ bbox=coco_bbox,
92
+ category_id=int(remapped_category_id),
93
+ category_name=category_name,
94
+ )
95
+ if coco_annotation.area > 0:
96
+ coco_image.add_annotation(coco_annotation)
97
+ coco.add_image(coco_image)
98
+
99
+ result = coco.split_coco_as_train_val(train_split_rate=train_split_rate)
100
+
101
+ train_json_path = Path(output_dir) / "train.json"
102
+ val_json_path = Path(output_dir) / "val.json"
103
+ save_json(data=result["train_coco"].json, save_path=train_json_path)
104
+ save_json(data=result["val_coco"].json, save_path=val_json_path)
105
+
106
+
107
+ def get_ordered_image_name_list(image_name_to_annotation_ind: Dict):
108
+ image_name_list: List[str] = list(image_name_to_annotation_ind.keys())
109
+
110
+ def get_image_ind(image_name: str):
111
+ return int(image_name.split(".")[0])
112
+
113
+ image_name_list.sort(key=get_image_ind)
114
+
115
+ return image_name_list
116
+
117
+
118
+ def get_labels(fname):
119
+ """
120
+ Gets label data from a geojson label file
121
+ Args:
122
+ fname: file path to an xView geojson label file
123
+ Output:
124
+ Returns three arrays: coords, chips, and classes corresponding to the
125
+ coordinates, file-names, and classes for each ground truth.
126
+ Modified from https://github.com/DIUx-xView.
127
+ """
128
+ data = load_json(fname)
129
+
130
+ coords = np.zeros((len(data["features"]), 4))
131
+ chips = np.zeros((len(data["features"])), dtype="object")
132
+ classes = np.zeros((len(data["features"])))
133
+ image_name_to_annotation_ind = defaultdict(list)
134
+
135
+ for i in tqdm(range(len(data["features"])), "Parsing xView data"):
136
+ if data["features"][i]["properties"]["bounds_imcoords"] != []:
137
+ b_id = data["features"][i]["properties"]["image_id"]
138
+ # https://github.com/DIUx-xView/xView1_baseline/issues/3
139
+ if b_id == "1395.tif":
140
+ continue
141
+ val = np.array(
142
+ [
143
+ int(num)
144
+ for num in data["features"][i]["properties"][
145
+ "bounds_imcoords"
146
+ ].split(",")
147
+ ]
148
+ )
149
+ chips[i] = b_id
150
+ classes[i] = data["features"][i]["properties"]["type_id"]
151
+
152
+ image_name_to_annotation_ind[b_id].append(i)
153
+
154
+ if val.shape[0] != 4:
155
+ print("Issues at %d!" % i)
156
+ else:
157
+ coords[i] = val
158
+ else:
159
+ chips[i] = "None"
160
+
161
+ return coords, chips, classes, image_name_to_annotation_ind
162
+
163
+
164
+ if __name__ == "__main__":
165
+ fire.Fire(xview_to_coco)