コード例 #1
0
    def load_data(self):
        db = COCO(osp.join(self.data_path,
                           '3DPW_' + self.data_split + '.json'))

        datalist = []
        for aid in db.anns.keys():
            ann = db.anns[aid]

            image_id = ann['image_id']
            img = db.loadImgs(image_id)[0]
            img_width, img_height = img['width'], img['height']
            sequence_name = img['sequence']
            img_name = img['file_name']
            img_path = osp.join(self.data_path, 'imageFiles', sequence_name,
                                img_name)

            cam_param = {
                k: np.array(v, dtype=np.float32)
                for k, v in img['cam_param'].items()
            }
            joint_cam = np.array(ann['joint_cam'],
                                 dtype=np.float32).reshape(-1, 3)
            joint_img = cam2pixel(joint_cam, cam_param['focal'],
                                  cam_param['princpt'])
            joint_valid = ((joint_img[:, 0] >= 0) *
                           (joint_img[:, 0] < img_width) *
                           (joint_img[:, 1] >= 0) *
                           (joint_img[:, 1] < img_height)).astype(np.float32)

            root_cam = joint_cam[self.root_idx]
            root_img = joint_img[self.root_idx]
            root_vis = joint_valid[self.root_idx]

            bbox = process_bbox(ann['bbox'], img_width, img_height)
            if bbox is None: continue
            area = bbox[2] * bbox[3]

            datalist.append({
                'img_path': img_path,
                'img_id': image_id,
                'ann_id': aid,
                'bbox': bbox,
                'area': area,
                'root_img': root_img,
                'root_cam': root_cam,
                'root_vis': root_vis,
                'f': cam_param['focal'],
                'c': cam_param['princpt']
            })

        return datalist
コード例 #2
0
    def load_data(self):
        subject_list = self.get_subject()
        sampling_ratio = self.get_subsampling_ratio()

        # aggregate annotations from each subject
        db = COCO()
        for subject in subject_list:
            with open(
                    osp.join(self.annot_path,
                             'Human36M_subject' + str(subject) + '.json'),
                    'r') as f:
                annot = json.load(f)
            if len(db.dataset) == 0:
                for k, v in annot.items():
                    db.dataset[k] = v
            else:
                for k, v in annot.items():
                    db.dataset[k] += v
        db.createIndex()

        if self.data_split == 'test' and not cfg.use_gt_info:
            print("Get bounding box and root from " + self.human_bbox_root_dir)
            bbox_root_result = {}
            with open(self.human_bbox_root_dir) as f:
                annot = json.load(f)
            for i in range(len(annot)):
                bbox_root_result[str(annot[i]['image_id'])] = {
                    'bbox': np.array(annot[i]['bbox']),
                    'root': np.array(annot[i]['root_cam'])
                }
        else:
            print("Get bounding box and root from groundtruth")

        data = []
        for aid in db.anns.keys():
            ann = db.anns[aid]

            image_id = ann['image_id']
            img = db.loadImgs(image_id)[0]

            # check subject and frame_idx
            subject = img['subject']
            frame_idx = img['frame_idx']
            if subject not in subject_list:
                continue
            if frame_idx % sampling_ratio != 0:
                continue

            img_path = osp.join(self.img_dir, img['file_name'])
            img_width, img_height = img['width'], img['height']
            cam_param = img['cam_param']
            R, t, f, c = np.array(cam_param['R']), np.array(
                cam_param['t']), np.array(cam_param['f']), np.array(
                    cam_param['c'])

            # project world coordinate to cam, image coordinate space
            joint_world = np.array(ann['keypoints_world'])
            joint_world = self.add_thorax(joint_world)
            joint_cam = np.zeros((self.joint_num, 3))
            for j in range(self.joint_num):
                joint_cam[j] = world2cam(joint_world[j], R, t)
            joint_img = np.zeros((self.joint_num, 3))
            joint_img[:, 0], joint_img[:, 1], joint_img[:, 2] = cam2pixel(
                joint_cam, f, c)
            joint_img[:, 2] = joint_img[:, 2] - joint_cam[self.root_idx, 2]
            joint_vis = np.ones((self.joint_num, 1))

            if self.data_split == 'test' and not cfg.use_gt_info:
                bbox = bbox_root_result[str(
                    image_id
                )]['bbox']  # bbox should be aspect ratio preserved-extended. It is done in RootNet.
                root_cam = bbox_root_result[str(image_id)]['root']
            else:
                bbox = np.array(ann['bbox'])
                root_cam = joint_cam[self.root_idx]

                # aspect ratio preserving bbox
                w = bbox[2]
                h = bbox[3]
                c_x = bbox[0] + w / 2.
                c_y = bbox[1] + h / 2.
                aspect_ratio = cfg.input_shape[1] / cfg.input_shape[0]
                if w > aspect_ratio * h:
                    h = w / aspect_ratio
                elif w < aspect_ratio * h:
                    w = h * aspect_ratio
                bbox[2] = w * 1.25
                bbox[3] = h * 1.25
                bbox[0] = c_x - bbox[2] / 2.
                bbox[1] = c_y - bbox[3] / 2.

            data.append({
                'img_path': img_path,
                'img_id': image_id,
                'bbox': bbox,
                'joint_img':
                joint_img,  # [org_img_x, org_img_y, depth - root_depth]
                'joint_cam': joint_cam,  # [X, Y, Z] in camera coordinate
                'joint_vis': joint_vis,
                'root_cam': root_cam,  # [X, Y, Z] in camera coordinate
                'f': f,
                'c': c
            })

        return data
コード例 #3
0
    def load_data(self):
        print('Load data of H36M Protocol ' + str(self.protocol))

        subject_list = self.get_subject()
        sampling_ratio = self.get_subsampling_ratio()

        # aggregate annotations from each subject
        db = COCO()
        cameras = {}
        joints = {}
        for subject in subject_list:
            # data load
            with open(
                    osp.join(self.annot_path,
                             'Human36M_subject' + str(subject) + '_data.json'),
                    'r') as f:
                annot = json.load(f)
            if len(db.dataset) == 0:
                for k, v in annot.items():
                    db.dataset[k] = v
            else:
                for k, v in annot.items():
                    db.dataset[k] += v
            # camera load
            with open(
                    osp.join(
                        self.annot_path,
                        'Human36M_subject' + str(subject) + '_camera.json'),
                    'r') as f:
                cameras[str(subject)] = json.load(f)
            # joint coordinate load
            with open(
                    osp.join(
                        self.annot_path,
                        'Human36M_subject' + str(subject) + '_joint_3d.json'),
                    'r') as f:
                joints[str(subject)] = json.load(f)
        db.createIndex()

        if self.data_split == 'test' and not cfg.use_gt_info:
            print("Get bounding box and root from " + self.human_bbox_root_dir)
            bbox_root_result = {}
            with open(self.human_bbox_root_dir) as f:
                annot = json.load(f)
            for i in range(len(annot)):
                bbox_root_result[str(annot[i]['image_id'])] = {
                    'bbox': np.array(annot[i]['bbox']),
                    'root': np.array(annot[i]['root_cam'])
                }
        else:
            print("Get bounding box and root from groundtruth")

        data = []
        for aid in db.anns.keys():
            ann = db.anns[aid]
            image_id = ann['image_id']
            img = db.loadImgs(image_id)[0]
            img_path = osp.join(self.img_dir, img['file_name'])
            img_width, img_height = img['width'], img['height']

            # check subject and frame_idx
            subject = img['subject']
            frame_idx = img['frame_idx']
            if subject not in subject_list:
                continue
            if frame_idx % sampling_ratio != 0:
                continue

            # camera parameter
            cam_idx = img['cam_idx']
            cam_param = cameras[str(subject)][str(cam_idx)]
            R, t, f, c = np.array(cam_param['R'], dtype=np.float32), np.array(
                cam_param['t'], dtype=np.float32), np.array(
                    cam_param['f'],
                    dtype=np.float32), np.array(cam_param['c'],
                                                dtype=np.float32)

            # project world coordinate to cam, image coordinate space
            action_idx = img['action_idx']
            subaction_idx = img['subaction_idx']
            frame_idx = img['frame_idx']
            joint_world = np.array(joints[str(subject)][str(action_idx)][str(
                subaction_idx)][str(frame_idx)],
                                   dtype=np.float32)
            joint_world = self.add_thorax(joint_world)
            joint_cam = world2cam(joint_world, R, t)
            joint_img = cam2pixel(joint_cam, f, c)
            joint_img[:, 2] = joint_img[:, 2] - joint_cam[self.root_idx, 2]
            joint_vis = np.ones((self.joint_num, 1))

            if self.data_split == 'test' and not cfg.use_gt_info:
                bbox = bbox_root_result[str(
                    image_id
                )]['bbox']  # bbox should be aspect ratio preserved-extended. It is done in RootNet.
                root_cam = bbox_root_result[str(image_id)]['root']
            else:
                bbox = process_bbox(np.array(ann['bbox']), img_width,
                                    img_height)
                if bbox is None: continue
                root_cam = joint_cam[self.root_idx]

            data.append({
                'img_path': img_path,
                'img_id': image_id,
                'bbox': bbox,
                'joint_img':
                joint_img,  # [org_img_x, org_img_y, depth - root_depth]
                'joint_cam': joint_cam,  # [X, Y, Z] in camera coordinate
                'joint_vis': joint_vis,
                'root_cam': root_cam,  # [X, Y, Z] in camera coordinate
                'f': f,
                'c': c
            })

        return data
コード例 #4
0
    def load_data(self):
        print('Load data of H36M Protocol ' + str(self.protocol))
        subject_list = self.get_subject()
        sampling_ratio = self.get_subsampling_ratio()

        # aggregate annotations from each subject
        db = COCO()
        cameras = {}
        joints = {}
        for subject in subject_list:
            # data load
            with open(
                    osp.join(self.annot_path,
                             'Human36M_subject' + str(subject) + '_data.json'),
                    'r') as f:
                annot = json.load(f)
            if len(db.dataset) == 0:
                for k, v in annot.items():
                    db.dataset[k] = v
            else:
                for k, v in annot.items():
                    db.dataset[k] += v
            # camera load
            with open(
                    osp.join(
                        self.annot_path,
                        'Human36M_subject' + str(subject) + '_camera.json'),
                    'r') as f:
                cameras[str(subject)] = json.load(f)
            # joint coordinate load
            with open(
                    osp.join(
                        self.annot_path,
                        'Human36M_subject' + str(subject) + '_joint_3d.json'),
                    'r') as f:
                joints[str(subject)] = json.load(f)
        db.createIndex()

        if self.data_split == 'test' and not cfg.use_gt_bbox:
            print("Get bounding box from " + self.human_bbox_dir)
            bbox_result = {}
            with open(self.human_bbox_dir) as f:
                annot = json.load(f)
            for i in range(len(annot)):
                bbox_result[str(annot[i]['image_id'])] = np.array(
                    annot[i]['bbox'])
        else:
            print("Get bounding box from groundtruth")

        data = []
        for aid in db.anns.keys():
            ann = db.anns[aid]
            image_id = ann['image_id']
            img = db.loadImgs(image_id)[0]
            img_path = osp.join(self.img_dir, img['file_name'])
            img_width, img_height = img['width'], img['height']

            # check subject and frame_idx
            subject = img['subject']
            frame_idx = img['frame_idx']
            if subject not in subject_list:
                continue
            if frame_idx % sampling_ratio != 0:
                continue

            # camera parameter
            cam_idx = img['cam_idx']
            cam_param = cameras[str(subject)][str(cam_idx)]
            R, t, f, c = np.array(cam_param['R'], dtype=np.float32), np.array(
                cam_param['t'], dtype=np.float32), np.array(
                    cam_param['f'],
                    dtype=np.float32), np.array(cam_param['c'],
                                                dtype=np.float32)

            # project world coordinate to cam, image coordinate space
            action_idx = img['action_idx']
            subaction_idx = img['subaction_idx']
            frame_idx = img['frame_idx']
            root_world = np.array(joints[str(subject)][str(action_idx)][str(
                subaction_idx)][str(frame_idx)],
                                  dtype=np.float32)[self.root_idx]
            root_cam = world2cam(root_world[None, :], R, t)[0]
            root_img = cam2pixel(root_cam[None, :], f, c)[0]
            joint_vis = np.ones((self.joint_num, 1))
            root_vis = np.array(ann['keypoints_vis'])[self.root_idx, None]

            # bbox load
            if self.data_split == 'test' and not cfg.use_gt_bbox:
                bbox = bbox_result[str(image_id)]
            else:
                bbox = np.array(ann['bbox'])
            bbox = process_bbox(bbox, img_width, img_height)
            if bbox is None: continue
            area = bbox[2] * bbox[3]

            data.append({
                'img_path': img_path,
                'img_id': image_id,
                'bbox': bbox,
                'area': area,
                'root_img': root_img,  # [org_img_x, org_img_y, depth]
                'root_cam': root_cam,
                'root_vis': root_vis,
                'f': f,
                'c': c
            })

        return data