예제 #1
0
 def get_obj_verts_faces(self, idx):
     obj = self.objnames[idx]
     trans = self.objtransforms[idx]
     verts = self.split_objects[obj]['verts']
     trans_verts = fhbutils.transform_obj_verts(verts, trans, self.cam_extr)
     objfaces = self.split_objects[obj]['faces']
     if self.override_scale:
         trans_verts = trans_verts - trans_verts.mean(0)
         # Inscribe in sphere of scale 0.1 (10 cm)
         trans_verts = 100 * trans_verts / np.linalg.norm(trans_verts, axis=1).max()
     return np.array(trans_verts).astype(
         np.float32), np.array(objfaces).astype(np.int16)
예제 #2
0
    def load_dataset(self):
        suffix = ""
        if self.use_objects:
            if self.filter_no_contact:
                suffix = "{}filter_dist_{}".format(suffix, self.filter_thresh)
            else:
                suffix = "{}no_filter".format(suffix)
        if self.split_type == "objects" and self.use_objects:
            suffix = "{}_obj_{}".format(suffix, self.test_object)
        if not self.use_objects and self.split_type == "subjects":
            if self.remove_objects:
                suffix = "{}_hand_without_annot_objs".format(suffix)
            else:
                suffix = "{}_hand_all".format(suffix)
        if self.split_type == "subjects":
            if self.original_subject_split:
                suffix = suffix + "_or_subjects"
            else:
                suffix = suffix + "_my_subjects"
        cache_path = os.path.join(
            self.cache_folder,
            "{}_{}_{}_top{}_filt{}.pkl".format(
                self.split,
                self.mini_factor,
                suffix,
                self.topology,
                self.filter_object,
            ),
        )
        if os.path.exists(cache_path) and self.use_cache:
            with open(cache_path, "rb") as cache_f:
                annotations = pickle.load(cache_f)
            print(
                "Cached information for dataset {} loaded from {}".format(
                    self.name, cache_path
                )
            )

        else:
            subjects_infos = {}
            for subject in self.subjects:
                subject_info_path = os.path.join(
                    self.info_root, "{}_info.txt".format(subject)
                )
                subjects_infos[subject] = {}
                with open(subject_info_path, "r") as subject_f:
                    raw_lines = subject_f.readlines()
                    for line in raw_lines[3:]:
                        line = " ".join(line.split())
                        action, action_idx, length = line.strip().split(" ")
                        subjects_infos[subject][(action, action_idx)] = length
            skel_info = get_skeletons(self.skeleton_root, subjects_infos)

            with open(self.info_split, "r") as annot_f:
                lines_raw = annot_f.readlines()
            train_list, test_list, all_infos = fhbutils.get_action_train_test(
                lines_raw, subjects_infos
            )
            if self.topology is None:
                all_objects = ["juice_bottle", "liquid_soap", "milk", "salt"]
            elif int(self.topology) == 0:
                all_objects = ["juice_bottle", "liquid_soap", "salt"]
            elif int(self.topology) == 1:
                all_objects = ["milk"]
            if self.filter_object:
                all_objects = [self.filter_object]

            if self.use_objects is True:
                self.fhb_objects = fhbutils.load_objects(
                    object_names=all_objects
                )
                obj_infos = fhbutils.load_object_infos()

            if self.split_type == "action":
                if self.split == "train":
                    sample_list = train_list
                elif self.split == "test":
                    sample_list = test_list
                elif self.split == "all":
                    sample_list = train_list + test_list
                else:
                    raise ValueError(
                        "Split {} not in [train|test|all]".format(self.split)
                    )
            elif self.split_type == "subjects":
                if self.original_subject_split:
                    if self.split == "train":
                        subjects = ["Subject_1", "Subject_3", "Subject_4"]
                    elif self.split == "test":
                        subjects = ["Subject_2", "Subject_5", "Subject_6"]
                    else:
                        raise ValueError(
                            "Split {} not in [train|test]".format(self.split)
                        )
                else:
                    if self.split == "train":
                        subjects = [
                            "Subject_1",
                            "Subject_2",
                            "Subject_3",
                            "Subject_4",
                        ]
                    elif self.split == "val":
                        subjects = ["Subject_5"]
                    elif self.split == "test":
                        subjects = ["Subject_6"]
                    else:
                        raise ValueError(
                            "Split {} not in [train|val|test]".format(
                                self.split
                            )
                        )
                self.subjects = subjects
                print(subjects)
                sample_list = all_infos
            elif self.split_type == "objects":
                if self.use_objects:
                    test_objects = {
                        self.test_object: self.fhb_objects.pop(
                            self.test_object
                        )
                    }
                    train_objects = self.fhb_objects
                    if self.split == "train":
                        self.split_objects = train_objects
                    elif self.split == "test":
                        self.split_objects = test_objects
                        pass
                    elif self.split == "all":
                        self.split_objects = {**train_objects, **test_objects}
                    else:
                        raise ValueError("Split {} not in [train|test]")
                    print(self.split_objects.keys())
                sample_list = all_infos
            else:
                raise ValueError(
                    "split_type {} not in [action|objects|subjects]".format(
                        self.split_type
                    )
                )
            if self.split_type != "subjects":
                self.subjects = [
                    "Subject_1",
                    "Subject_2",
                    "Subject_3",
                    "Subject_4",
                    "Subject_5",
                    "Subject_6",
                ]
            if self.use_objects and self.split_type != "objects":
                self.split_objects = self.fhb_objects

            image_names = []
            joints2d = []
            joints3d = []
            hand_sides = []
            clips = []
            sample_infos = []
            if self.use_objects:
                objnames = []
                objtransforms = []
            for subject, action_name, seq_idx, frame_idx in sample_list:
                img_path = os.path.join(
                    self.rgb_root,
                    subject,
                    action_name,
                    seq_idx,
                    "color",
                    self.rgb_template.format(frame_idx),
                )
                skel = skel_info[subject][(action_name, seq_idx)][frame_idx]
                skel = skel[self.reorder_idx]

                skel_hom = np.concatenate(
                    [skel, np.ones([skel.shape[0], 1])], 1
                )
                skel_camcoords = (
                    self.cam_extr.dot(skel_hom.transpose())
                    .transpose()[:, :3]
                    .astype(np.float32)
                )
                if subject in self.subjects:
                    if self.use_objects:
                        if (
                            subject in obj_infos
                            and (action_name, seq_idx, frame_idx)
                            in obj_infos[subject]
                        ):
                            obj, trans = obj_infos[subject][
                                (action_name, seq_idx, frame_idx)
                            ]
                            if obj in self.split_objects:
                                if self.filter_no_contact:
                                    verts = self.split_objects[obj]["verts"]
                                    trans_verts = fhbutils.transform_obj_verts(
                                        verts, trans, self.cam_extr
                                    )
                                    all_dists = cdist(
                                        trans_verts, skel_camcoords
                                    )
                                    if all_dists.min() > self.filter_thresh:
                                        continue
                                clips.append((subject, action_name, seq_idx))
                                objtransforms.append(trans)
                                objnames.append(obj)
                            else:
                                continue
                        else:
                            # Skip samples without objects if object mode
                            continue
                    else:
                        if self.remove_objects:
                            # Remove samples with object annoations
                            wrong_object = False
                            for obj in all_objects:
                                if obj in action_name:
                                    wrong_object = True
                            if wrong_object:
                                continue
                else:
                    continue

                joints3d.append(skel_camcoords)
                image_names.append(img_path)
                sample_infos.append(
                    {
                        "subject": subject,
                        "action_name": action_name,
                        "seq_idx": seq_idx,
                        "frame_idx": frame_idx,
                    }
                )
                hom_2d = (
                    np.array(self.cam_intr)
                    .dot(skel_camcoords.transpose())
                    .transpose()
                )
                skel2d = (hom_2d / hom_2d[:, 2:])[:, :2]
                joints2d.append(skel2d.astype(np.float32))
                hand_sides.append("right")

            # Extract labels
            if self.mini_factor:
                idxs = list(range(len(image_names)))
                mini_nb = int(len(image_names) * self.mini_factor)
                random.Random(1).shuffle(idxs)
                idxs = idxs[:mini_nb]
                image_names = [image_names[idx] for idx in idxs]
                joints2d = [joints2d[idx] for idx in idxs]
                joints3d = [joints3d[idx] for idx in idxs]
                hand_sides = [hand_sides[idx] for idx in idxs]
                sample_infos = [sample_infos[idx] for idx in idxs]

                if self.use_objects:
                    objnames = [objnames[idx] for idx in idxs]
                    objtransforms = [objtransforms[idx] for idx in idxs]
            annotations = {
                "image_names": image_names,
                "joints2d": joints2d,
                "joints3d": joints3d,
                "hand_sides": hand_sides,
                "sample_infos": sample_infos,
            }
            if self.use_objects:
                annotations["objnames"] = objnames
                annotations["objtransforms"] = objtransforms
                annotations["split_objects"] = self.split_objects
                print("clip_nb: {}".format(len(set(clips))))
            with open(cache_path, "wb") as fid:
                pickle.dump(annotations, fid)
            print(
                "Wrote cache for dataset {} to {}".format(
                    self.name, cache_path
                )
            )

            # Get image paths
        self.image_names = annotations["image_names"]
        self.joints2d = annotations["joints2d"]
        self.joints3d = annotations["joints3d"]
        self.hand_sides = annotations["hand_sides"]
        self.sample_infos = annotations["sample_infos"]
        if self.use_objects:
            self.objnames = annotations["objnames"]
            self.objtransforms = annotations["objtransforms"]
            self.split_objects = annotations["split_objects"]
예제 #3
0
    def load_dataset(self):
        suffix = ''
        if self.use_objects:
            if self.filter_no_contact:
                suffix = '{}filter_dist_{}'.format(suffix, self.filter_thresh)
            else:
                suffix = '{}no_filter'.format(suffix)
        if self.split_type == 'objects' and self.use_objects:
            suffix = '{}_obj_{}'.format(suffix, self.test_object)
        if not self.use_objects and self.split_type == 'subjects':
            if self.remove_objects:
                suffix = '{}_hand_without_annot_objs'.format(suffix)
            else:
                suffix = '{}_hand_all'.format(suffix)
        if self.split_type == 'subjects':
            if self.original_subject_split:
                suffix = suffix + '_or_subjects'
            else:
                suffix = suffix + '_my_subjects'
        cache_path = os.path.join(self.cache_folder, '{}_{}_{}_top{}_filt{}.pkl'.format(
            self.split, self.mini_factor, suffix, self.topology, self.filter_object))
        if os.path.exists(cache_path) and self.use_cache:
            with open(cache_path, 'rb') as cache_f:
                annotations = pickle.load(cache_f)
            print('Cached information for dataset {} loaded from {}'.format(
                self.name, cache_path))

        else:
            subjects_infos = {}
            for subject in self.subjects:
                subject_info_path = os.path.join(self.info_root,
                                                 '{}_info.txt'.format(subject))
                subjects_infos[subject] = {}
                with open(subject_info_path, 'r') as subject_f:
                    raw_lines = subject_f.readlines()
                    for line in raw_lines[3:]:
                        line = ' '.join(line.split())
                        action, action_idx, length = line.strip().split(' ')
                        subjects_infos[subject][(action, action_idx)] = length
            skel_info = get_skeletons(self.skeleton_root, subjects_infos)

            with open(self.info_split, 'r') as annot_f:
                lines_raw = annot_f.readlines()
            train_list, test_list, all_infos = fhbutils.get_action_train_test(
                lines_raw, subjects_infos)
            if self.topology is None:
                all_objects = ['juice_bottle', 'liquid_soap', 'milk', 'salt']
            elif int(self.topology) == 0:
                all_objects = ['juice_bottle', 'liquid_soap', 'salt']
            elif int(self.topology) == 1:
                all_objects = ['milk']
            if self.filter_object:
                all_objects = [self.filter_object]

            if self.use_objects is True:
                self.fhb_objects = fhbutils.load_objects(
                    object_names=all_objects)
                obj_infos = fhbutils.load_object_infos()

            if self.split_type == 'action':
                if self.split == 'train':
                    sample_list = train_list
                elif self.split == 'test':
                    sample_list = test_list
                elif self.split == 'all':
                    sample_list = train_list + test_list
                else:
                    raise ValueError(
                        'Split {} not valid for fhbhands, should be [train|test|all]'.
                        format(self.split))
            elif self.split_type == 'subjects':
                if self.original_subject_split:
                    if self.split == 'train':
                        subjects = ['Subject_1', 'Subject_3', 'Subject_4']
                    elif self.split == 'test':
                        subjects = ['Subject_2', 'Subject_5', 'Subject_6']
                    else:
                        raise ValueError(
                            'Split {} not valid for fhbhands split_type subjects, should be [train|test]'.
                            format(self.split))
                else:
                    if self.split == 'train':
                        subjects = [
                            'Subject_1', 'Subject_2', 'Subject_3', 'Subject_4'
                        ]
                    elif self.split == 'val':
                        subjects = ['Subject_5']
                    elif self.split == 'test':
                        subjects = ['Subject_6']
                    else:
                        raise ValueError(
                            'Split {} not valid for fhbhands split_type subjects, should be [train|val|test]'.
                            format(self.split))
                self.subjects = subjects
                print(subjects)
                sample_list = all_infos
            elif self.split_type == 'objects':
                if self.use_objects:
                    test_objects = {
                        self.test_object:
                        self.fhb_objects.pop(self.test_object)
                    }
                    train_objects = self.fhb_objects
                    if self.split == 'train':
                        self.split_objects = train_objects
                    elif self.split == 'test':
                        self.split_objects = test_objects
                        pass
                    elif self.split == 'all':
                        self.split_objects = {**train_objects, **test_objects}
                    else:
                        raise ValueError(
                            'Split {} not valid for fhbhands split_type objects, should be in [train|test]'
                        )
                    print(self.split_objects.keys())
                sample_list = all_infos
            else:
                raise ValueError(
                    'split_type should be in [action|objects|subjects], got {}'.
                    format(self.split_type))
            if self.split_type != 'subjects':
                self.subjects = [
                    'Subject_1', 'Subject_2', 'Subject_3', 'Subject_4',
                    'Subject_5', 'Subject_6'
                ]
            if self.use_objects and self.split_type != 'objects':
                self.split_objects = self.fhb_objects

            image_names = []
            joints2d = []
            joints3d = []
            hand_sides = []
            clips = []
            sample_infos = []
            if self.use_objects:
                objnames = []
                objtransforms = []
            for subject, action_name, seq_idx, frame_idx in sample_list:
                img_path = os.path.join(self.rgb_root, subject, action_name,
                                        seq_idx, 'color',
                                        self.rgb_template.format(frame_idx))
                skel = skel_info[subject][(action_name, seq_idx)][frame_idx]
                skel = skel[self.reorder_idx]

                skel_hom = np.concatenate([skel,
                                           np.ones([skel.shape[0], 1])], 1)
                skel_camcoords = self.cam_extr.dot(skel_hom.transpose(
                )).transpose()[:, :3].astype(np.float32)
                if subject in self.subjects:
                    if self.use_objects:
                        if subject in obj_infos and (
                                action_name, seq_idx,
                                frame_idx) in obj_infos[subject]:
                            obj, trans = obj_infos[subject][(action_name,
                                                             seq_idx,
                                                             frame_idx)]
                            if obj in self.split_objects:
                                if self.filter_no_contact:
                                    verts = self.split_objects[obj]['verts']
                                    trans_verts = fhbutils.transform_obj_verts(verts, trans, self.cam_extr)
                                    all_dists = cdist(trans_verts, skel_camcoords)
                                    if all_dists.min() > self.filter_thresh:
                                        continue
                                clips.append((subject, action_name, seq_idx))
                                objtransforms.append(trans)
                                objnames.append(obj)
                            else:
                                continue
                        else:
                            # Skip samples without objects if object mode
                            continue
                    else:
                        if self.remove_objects:
                            # Remove samples with object annoations
                            wrong_object = False
                            for obj in all_objects:
                                if obj in action_name:
                                    wrong_object = True
                            if wrong_object:
                                continue
                else:
                    continue

                joints3d.append(skel_camcoords)
                image_names.append(img_path)
                sample_infos.append({
                    'subject': subject,
                    'action_name': action_name,
                    'seq_idx': seq_idx,
                    'frame_idx': frame_idx
                })
                hom_2d = np.array(self.cam_intr).dot(
                    skel_camcoords.transpose()).transpose()
                skel2d = (hom_2d / hom_2d[:, 2:])[:, :2]
                joints2d.append(skel2d.astype(np.float32))
                hand_sides.append('right')

            # Extract labels
            if self.mini_factor:
                idxs = list(range(len(image_names)))
                mini_nb = int(len(image_names) * self.mini_factor)
                random.Random(1).shuffle(idxs)
                idxs = idxs[:mini_nb]
                image_names = [image_names[idx] for idx in idxs]
                joints2d = [joints2d[idx] for idx in idxs]
                joints3d = [joints3d[idx] for idx in idxs]
                hand_sides = [hand_sides[idx] for idx in idxs]
                sample_infos = [sample_infos[idx] for idx in idxs]

                if self.use_objects:
                    objnames = [objnames[idx] for idx in idxs]
                    objtransforms = [objtransforms[idx] for idx in idxs]
            annotations = {
                'image_names': image_names,
                'joints2d': joints2d,
                'joints3d': joints3d,
                'hand_sides': hand_sides,
                'sample_infos': sample_infos,
            }
            if self.use_objects:
                annotations['objnames'] = objnames
                annotations['objtransforms'] = objtransforms
                annotations['split_objects'] = self.split_objects
                print('clip_nb: {}'.format(len(set(clips))))
            with open(cache_path, 'wb') as fid:
                pickle.dump(annotations, fid)
            print('Wrote cache for dataset {} to {}'.format(
                self.name, cache_path))

            # Get image paths
        self.image_names = annotations['image_names']
        self.joints2d = annotations['joints2d']
        self.joints3d = annotations['joints3d']
        self.hand_sides = annotations['hand_sides']
        self.sample_infos = annotations['sample_infos']
        if self.use_objects:
            self.objnames = annotations['objnames']
            self.objtransforms = annotations['objtransforms']
            self.split_objects = annotations['split_objects']