def test_eq(self): keypoints1 = LabeledKeypoints2D([[1, 1, 2]], category="cat", attributes={"gender": "male"}) keypoints2 = LabeledKeypoints2D([[1, 1, 2]], category="cat", attributes={"gender": "male"}) keypoints3 = LabeledKeypoints2D([[1, 2, 2]], category="cat", attributes={"gender": "male"}) assert keypoints1 == keypoints2 assert keypoints1 != keypoints3
def test_init(self): labeledkeypoints2d = LabeledKeypoints2D( [(1, 2)], category="cat", attributes={"gender": "male"}, instance="12345" ) assert labeledkeypoints2d[0] == Keypoint2D(x=1, y=2) assert labeledkeypoints2d.category == "cat" assert labeledkeypoints2d.attributes == {"gender": "male"} assert labeledkeypoints2d.instance == "12345"
def _get_data(image_path: str, annotation: Dict[str, Any]) -> Data: data = Data(image_path) keypoints = LabeledKeypoints2D() for x, y, v in chunked(annotation["keypoints"], 3): keypoints.append(Keypoint2D(x, y, v if v in (0, 1, 2) else 2)) data.label.keypoints2d = [keypoints] return data
def LeedsSportsPose(path: str) -> Dataset: """`Leeds Sports Pose <http://sam.johnson.io/research/lsp.html>`_ dataset. The folder structure should be like:: <path> joints.mat images/ im0001.jpg im0002.jpg ... Arguments: path: The root directory of the dataset. Raises: ModuleImportError: When the module "scipy" can not be found. Returns: Loaded :class:`~tensorbay.dataset.dataset.Dataset` instance. """ try: from scipy.io import loadmat # pylint: disable=import-outside-toplevel except ModuleNotFoundError as error: raise ModuleImportError(module_name=error.name) from error root_path = os.path.abspath(os.path.expanduser(path)) dataset = Dataset(DATASET_NAME) dataset.load_catalog( os.path.join(os.path.dirname(__file__), "catalog.json")) segment = dataset.create_segment() mat = loadmat(os.path.join(root_path, "joints.mat")) joints = mat["joints"].T image_paths = glob(os.path.join(root_path, "images", "*.jpg")) for image_path in image_paths: data = Data(image_path) data.label.keypoints2d = [] index = int(os.path.basename(image_path) [2:6]) - 1 # get image index from "im0001.jpg" keypoints = LabeledKeypoints2D() for keypoint in joints[index]: keypoints.append( Keypoint2D(keypoint[0], keypoint[1], int(not keypoint[2]))) data.label.keypoints2d.append(keypoints) segment.append(data) return dataset
def test_dumps(self): labeledkeypoints2d = LabeledKeypoints2D( [(1, 1, 2)], category="cat", attributes={"gender": "male"}, instance="12345" ) assert labeledkeypoints2d.dumps() == { "keypoints2d": [ {"x": 1, "y": 1, "v": 2}, ], "category": "cat", "attributes": {"gender": "male"}, "instance": "12345", }
def _get_data(filename: str, beauty_score: str, image_path: str, label_path: str) -> Data: stem = os.path.splitext(os.path.basename(filename))[0] data = Data(os.path.join(image_path, filename)) keypoints2d = LabeledKeypoints2D() keypoints2d.attributes = {"beauty_score": float(beauty_score)} keypoints2d.category = _CATEGORY_NAMES[stem[:2]] with open(os.path.join(label_path, f"{stem}.pts"), "rb") as fp: points = struct.unpack("i172f", fp.read()) for x, y in chunked(islice(points, 1, None), 2): keypoints2d.append(Keypoint2D(float(x), float(y))) data.label.keypoints2d = [keypoints2d] return data
def _get_keypoints2d(person_keypoints_annotations: Dict[int, Any], image_id: int, categories: Dict[int, str]) -> List[LabeledKeypoints2D]: if image_id not in person_keypoints_annotations: return [] keypoints2d: List[LabeledKeypoints2D] = [] for annotation in person_keypoints_annotations[image_id]: points = chunked(annotation["keypoints"], 3) category = categories[annotation["category_id"]] keypoints2d.append(LabeledKeypoints2D(points, category=category)) return keypoints2d
def _get_data_part1(root_path: str, aniamls: Iterable[str]) -> Iterator[Data]: try: import xmltodict # pylint: disable=import-outside-toplevel except ModuleNotFoundError as error: raise ModuleImportError(module_name=error.name) from error for animal in aniamls: for image_path in glob( os.path.join(root_path, "keypoint_image_part1", animal, "*.jpg")): data = Data( image_path, target_remote_path=f"{animal}/{os.path.basename(image_path)}") for annotation_path in glob( os.path.join( root_path, "PASCAL2011_animal_annotation", animal, f"{os.path.splitext(os.path.basename(image_path))[0]}_*.xml", )): with open(annotation_path, encoding="utf-8") as fp: labels: Any = xmltodict.parse(fp.read()) box2d = labels["annotation"]["visible_bounds"] data.label.box2d = [ LabeledBox2D.from_xywh( x=float(box2d["@xmin"]), y=float(box2d["@ymin"]), width=float(box2d["@width"]), height=float(box2d["@height"]), category=animal, ) ] keypoints2d: List[Tuple[float, float, int]] = [ () ] * 20 # type: ignore[list-item] for keypoint in labels["annotation"]["keypoints"]["keypoint"]: keypoints2d[_KEYPOINT_TO_INDEX[keypoint["@name"]]] = ( float(keypoint["@x"]), float(keypoint["@y"]), int(keypoint["@visible"]), ) data.label.keypoints2d = [ LabeledKeypoints2D(keypoints2d, category=animal) ] yield data
def _get_label(eye_keypoints_path: str, face_keypoints_path: str) -> List[LabeledKeypoints2D]: eye_keypoints = LabeledKeypoints2D(category="EyePosition") with open(eye_keypoints_path, "r", encoding="utf-8") as fp: fp.readline() # The first line is like: #LX LY RX RY lx, ly, rx, ry = map(int, fp.readline().split()) eye_keypoints.append(Keypoint2D(lx, ly)) eye_keypoints.append(Keypoint2D(rx, ry)) face_keypoints = LabeledKeypoints2D(category="Face") with open(face_keypoints_path, "r", encoding="utf-8") as fp: # The annotation file is like: # 1 version: 1 # 2 n_points: 20 # 3 { # 4 159.128 108.541 # ... # 24 } for line in islice(fp, 3, 23): x, y = map(float, line.split()) face_keypoints.append(Keypoint2D(x, y)) return [eye_keypoints, face_keypoints]
def _get_data(keypoints_info: List[str], image_path: str, parsing_path: str) -> Data: stem = os.path.splitext(keypoints_info[0])[0] data = Data(os.path.join(image_path, f"{stem}.jpg")) label = data.label label.semantic_mask = SemanticMask( os.path.join(parsing_path, f"{stem}.png")) keypoints = LabeledKeypoints2D() for x, y, v in chunked(islice(keypoints_info, 1, None), 3): keypoints.append( Keypoint2D(float(x), float(y), 1 - int(v)) if x.isnumeric() else Keypoint2D(0, 0, 0)) label.keypoints2d = [keypoints] return data
def test_loads(self): contents = { "keypoints2d": [ {"x": 1, "y": 1, "v": 2}, ], "category": "cat", "attributes": {"gender": "male"}, "instance": "12345", } labeledkeypoints2d = LabeledKeypoints2D.loads(contents) assert labeledkeypoints2d[0] == Keypoint2D(x=1, y=1, v=2) assert labeledkeypoints2d.category == "cat" assert labeledkeypoints2d.attributes == {"gender": "male"} assert labeledkeypoints2d.instance == "12345"
def _get_data_part2(root_path: str, aniamls: Iterable[str]) -> Iterator[Data]: try: import xmltodict # pylint: disable=import-outside-toplevel except ModuleNotFoundError as error: raise ModuleImportError(module_name=error.name) from error for animal in aniamls: for image_path in glob( os.path.join(root_path, "animalpose_image_part2", animal, "*.jpeg")): data = Data( image_path, target_remote_path=f"{animal}/{os.path.basename(image_path)}") annotation_path = os.path.join( root_path, "animalpose_anno2", animal, f"{os.path.splitext(os.path.basename(image_path))[0]}.xml", ) with open(annotation_path, encoding="utf-8") as fp: labels: Any = xmltodict.parse(fp.read()) box2d = labels["annotation"]["visible_bounds"] data.label.box2d = [ LabeledBox2D.from_xywh( x=float(box2d["@xmin"]), y=float( box2d["@xmax"]), # xmax means ymin in the annotation width=float(box2d["@width"]), height=float(box2d["@height"]), category=animal, ) ] keypoints2d = LabeledKeypoints2D(category=animal) for keypoint in labels["annotation"]["keypoints"]["keypoint"]: keypoints2d.append( Keypoint2D(float(keypoint["@x"]), float(keypoint["@y"]), int(keypoint["@visible"]))) data.label.keypoints2d = [keypoints2d] yield data
def _get_keypoint2ds(label_path: str) -> Dict[str, LabeledKeypoints2D]: all_keypoint2ds = {} for file_path in ( os.path.join(label_path, "loose_landmark_test.csv"), os.path.join(label_path, "loose_landmark_train.csv"), ): # The normal format of each line of the file is # NAME_ID,P1X,P1Y,P2X,P2Y,P3X,P3Y,P4X,P4Y,P5X,P5Y # "n000001/0001_01",75.81253,110.2077,103.1778,104.6074,... # "n000001/0002_01",194.9206,211.5826,278.5339,206.3202,... # "n000001/0003_01",80.4145,74.07401,111.7425,75.42367,... # ... with open(file_path, encoding="utf-8") as fp: for row in islice(csv.reader(fp), 1, None): name_id = row.pop(0).strip('"') all_keypoint2ds[name_id] = LabeledKeypoints2D( chunked(map(float, row), 2)) return all_keypoint2ds
def _get_data(path: str, annotations: Any, flag: bool) -> Iterator[Tuple[Data, str]]: filepath_to_data: Dict[str, Data] = {} for annotation in annotations: filepath = annotation["filepath"][0] keypoints = LabeledKeypoints2D( annotation["coords"].T[_VALID_KEYPOINT_INDICES], attributes={ "poselet_hit_idx": annotation["poselet_hit_idx"].T.tolist() }, ) box2d = LabeledBox2D(*annotation["torsobox"][0].tolist()) if filepath not in filepath_to_data: data = Data(os.path.join(path, "images", filepath)) data.label.keypoints2d = [keypoints] data.label.box2d = [box2d] attribute = {"currframe": int(annotation["currframe"][0][0])} if flag: attribute["isunchecked"] = bool(annotation["isunchecked"]) data.label.classification = Classification( category=annotation["moviename"][0], attributes=attribute) filepath_to_data[filepath] = data if annotation["istrain"]: segment_name = "train" elif annotation["istest"]: segment_name = "test" else: segment_name = "bad" yield data, segment_name else: image_data = filepath_to_data[filepath] image_data.label.keypoints2d.append(keypoints) image_data.label.box2d.append(box2d)
def HKD(path: str) -> Dataset: """`HKD <http://vlm1.uta.edu/~srujana/HandPoseDataset/HK_Dataset.html>`_ dataset. The file structure should be like:: <path> AnnotatedData_subject1/ CropImages/ subject1_fingercount_cropframe_2.jpg subject1_fingercount_cropframe_3.jpg ... subject1_fingercount_cropframe_210.jpg subject1_fingercount_2D_Annotations_cropped.csv AnnotatedData_subject2/ CropImages/ subject2_fingercount_cropframe_2.jpg subject2_fingercount_cropframe_3.jpg ... subject2_fingercount_cropframe_207.jpg subject2_fingercount_2D_Annotations_cropped.csv AnnotatedData_subject3/ CropImages/ fingerappose_subject3_cropframe_2.jpg fingerappose_subject3_cropframe_3.jpg ... fingerappose_subject3_cropframe_235.jpg fingerappose_subject3_2D_Annotations_cropped.csv AnnotatedData_subject4/ CropImages/ subject4_cropframe_2.jpg subject4_cropframe_3.jpg ... subject4_cropframe_147.jpg subject4_2D_Annotations_cropped.csv Arguments: path: The root directory of the dataset. Returns: Loaded :class:`~tensorbay.dataset.dataset.Dataset` instance. """ root_path = os.path.abspath(os.path.expanduser(path)) dataset = Dataset(DATASET_NAME) dataset.load_catalog( os.path.join(os.path.dirname(__file__), "catalog.json")) for segment_name, (csv_name, image_name_template) in _SEGMENT_INFO.items(): segment = dataset.create_segment(segment_name) segment_path = os.path.join(root_path, f"AnnotatedData_{segment_name}") csv_path = os.path.join(root_path, segment_path, csv_name) with open(csv_path, encoding="utf-8") as fp: # The csv files should be like:: # subject1_fingercount_2D_Annotations_cropped.csv # 2,4.523,28.569,136.8,181.37,154.63,80.348,130.86,57.322,... # 3,4.523,32.731,135.31,176.17,147.2,80.348,123.43,65.493,... # 4,-2.413,39.668,149.41,164.28,143.47,70.692,137.53,64.75,... # 5,-1.026,31.344,138.77,178.4,136.54,78.863,135.06,75.149,... # ... # ... for csv_line in csv.reader(fp): image_path = os.path.join( segment_path, "CropImages", image_name_template.format(csv_line.pop(0))) data = Data(image_path) data.label.keypoints2d = [ LabeledKeypoints2D(chunked(map(float, csv_line), 2)) ] segment.append(data) return dataset