Пример #1
0
    def __init__(self,
                 use_cache=False,
                 mini_factor=None,
                 class_name='mobile_phone',
                 use_annots=True,
                 scale_factor=1.2):
        """
        Args:
            filter_no_contact: remove data where hand not in contact with object
            filter_thresh: min distance between hand and object to consider contact (mm)
        """
        super().__init__()
        self.all_queries = [
            BaseQueries.images, BaseQueries.joints3d, BaseQueries.sides,
            BaseQueries.objpoints3d
        ]
        trans_queries = get_trans_queries(self.all_queries)
        self.all_queries.extend(trans_queries)
        self.scale_factor = scale_factor
        self.split = 'all'
        class_dict = {
            'mobile_phone': ['o{}'.format(idx) for idx in range(6, 11)],
            'ball': ['o{}'.format(idx) for idx in range(31, 36)],
            'light_bulb': ['o{}'.format(idx) for idx in range(16, 21)],
            'marker': ['o{}'.format(idx) for idx in range(36, 41)],
            'can': ['o{}'.format(idx) for idx in range(21, 26)],
            'remote_control': ['o{}'.format(idx) for idx in range(46, 51)],
            'cups': ['o{}'.format(idx) for idx in range(41, 46)]
        }
        if class_name not in class_dict:
            raise ValueError('{} should be in {}'.format(
                class_name, class_dict.keys()))
        self.sessions = ['s{}'.format(idx) for idx in range(1, 12)]
        self.class_name = class_name
        self.class_keys = class_dict[class_name]

        # Set cache path
        self.use_cache = use_cache
        self.cache_folder = os.path.join('data', 'cache', 'core50')
        os.makedirs(self.cache_folder, exist_ok=True)
        self.name = 'core50'
        self.mini_factor = mini_factor

        self.root = '/sequoia/data2/dataset/handatasets/Core50'
        self.annot_root = os.path.join(self.root, 'core50_350x350_Annot')
        self.rgb_root = os.path.join(self.root, 'core50_350x350')
        self.depth_root = os.path.join(self.root, 'core50_350x350_DepthMap')
        self.load_dataset()

        print('Got {} samples for class {}'.format(len(self.image_names),
                                                   class_name))

        # get paired links as neighboured joints
        self.links = [(0, 1)]
        # Values from https://github.com/OpenKinect/libfreenect2/issues/41
        self.depth_intrinsic = [[365.456, 0, 254.878], [0, 365.456, 205.395],
                                [0, 0, 1]]
        self.color_intrinsic = [[1060.707250708333, 0, 956.354471815484],
                                [1058.608326305465, 0, 518.9784429882449],
                                [0, 0, 1]]
Пример #2
0
    def __init__(self,
                 split='train',
                 joint_nb=21,
                 scale_factor=2.2,
                 version=1,
                 side='right'):
        """
        Args:
            filter_no_contact: remove data where hand not in contact with object
            filter_thresh: min distance between hand and object to consider contact (mm)
        """
        super().__init__()
        super().__init__()
        self.all_queries = [
            BaseQueries.images, BaseQueries.joints3d, BaseQueries.sides,
            BaseQueries.objpoints3d
        ]
        trans_queries = get_trans_queries(self.all_queries)
        self.side = side
        self.all_queries.extend(trans_queries)
        self.scale_factor = scale_factor
        self.version = version
        self.root = os.path.join(
            '/sequoia/data2/dataset/handatasets/yanaimages/v{}'.format(
                self.version))

        self.name = 'yanademo_v{}'.format(self.version)
        self.joint_nb = joint_nb
        self.split = split

        self.load_dataset()

        print('Got {} samples for split {}'.format(len(self.image_names),
                                                   self.split))

        # get paired links as neighboured joints
        self.links = [(0, 1, 2, 3, 4), (0, 5, 6, 7, 8), (0, 9, 10, 11, 12),
                      (0, 13, 14, 15, 16), (0, 17, 18, 19, 20)]

        # Load normalization bone values
        mean_file = 'bones_synthgrasps_root_wrist.pkl'
        with open(
                os.path.join(
                    '/sequoia/data1/yhasson/code/pose_3d/handobjectdatasets/misc',
                    'stats', mean_file), 'rb') as p_f:
            grasp_data = pickle.load(p_f)
        self.mano_means = grasp_data['means']
Пример #3
0
    def __init__(
        self,
        split="train",
        split_type="subjects",
        original_subject_split=True,
        joint_nb=21,
        use_cache=False,
        mini_factor=None,
        use_objects=True,
        remove_objects=None,  # !! overriden for now
        test_object="juice_bottle",
        filter_no_contact=True,
        filter_thresh=10,
        topology=None,
        filter_object=None,
        override_scale=False,
    ):
        """
        Args:
            topology: if 0, juice_bottle, salt, liquid_soap, if 1 milk
        """
        super().__init__()
        self.all_queries = [
            BaseQueries.images,
            BaseQueries.joints2d,
            BaseQueries.joints3d,
            BaseQueries.sides,
            BaseQueries.camintrs,
            BaseQueries.meta,
        ]
        self.use_objects = use_objects
        self.filter_no_contact = filter_no_contact
        self.filter_thresh = filter_thresh
        self.override_scale = override_scale

        if self.use_objects:
            self.all_queries.append(BaseQueries.objverts3d)
            self.all_queries.append(BaseQueries.objpoints2d)
            self.all_queries.append(BaseQueries.objfaces)
        if self.use_objects:  # Overriding
            self.remove_objects = False
        else:
            self.remove_objects = False

        self.topology = topology
        self.test_object = test_object
        trans_queries = get_trans_queries(self.all_queries)
        self.all_queries.extend(trans_queries)
        # Set cache path
        self.use_cache = use_cache
        self.cache_folder = os.path.join("data", "cache", "fhb")
        os.makedirs(self.cache_folder, exist_ok=True)
        self.cam_extr = np.array(
            [
                [0.999988496304, -0.00468848412856, 0.000982563360594, 25.7],
                [0.00469115935266, 0.999985218048, -0.00273845880292, 1.22],
                [-0.000969709653873, 0.00274303671904, 0.99999576807, 3.902],
                [0, 0, 0, 1],
            ]
        )
        self.cam_intr = np.array(
            [
                [1395.749023, 0, 935.732544],
                [0, 1395.749268, 540.681030],
                [0, 0, 1],
            ]
        )

        self.reorder_idx = np.array(
            [
                0,
                1,
                6,
                7,
                8,
                2,
                9,
                10,
                11,
                3,
                12,
                13,
                14,
                4,
                15,
                16,
                17,
                5,
                18,
                19,
                20,
            ]
        )
        self.name = "fhb"
        self.joint_nb = joint_nb
        self.mini_factor = mini_factor
        split_opts = ["action", "objects", "subjects"]
        self.subjects = [
            "Subject_1",
            "Subject_2",
            "Subject_3",
            "Subject_4",
            "Subject_5",
            "Subject_6",
        ]
        if split_type not in split_opts:
            raise ValueError(
                "Split for dataset {} should be in {}, got {}".format(
                    self.name, split_opts, split_type
                )
            )

        self.split_type = split_type
        self.original_subject_split = original_subject_split

        self.root = "./datasymlinks/fhbhands"
        self.info_root = os.path.join(self.root, "Subjects_info")
        self.info_split = os.path.join(
            self.root, "data_split_action_recognition.txt"
        )
        self.rgb_root = os.path.join(self.root, "Video_files_480")
        self.skeleton_root = os.path.join(self.root, "Hand_pose_annotation_v1")
        self.filter_object = filter_object
        # Get file prefixes for images and annotations
        self.split = split
        self.rgb_template = "color_{:04d}.jpeg"
        # Joints are numbered from tip to base, we want opposite
        self.idxs = [
            0,
            4,
            3,
            2,
            1,
            8,
            7,
            6,
            5,
            12,
            11,
            10,
            9,
            16,
            15,
            14,
            13,
            20,
            19,
        ]
        self.load_dataset()

        print(
            "Got {} samples for split {}".format(
                len(self.image_names), self.split
            )
        )

        # get paired links as neighboured joints
        self.links = [
            (0, 1, 2, 3, 4),
            (0, 5, 6, 7, 8),
            (0, 9, 10, 11, 12),
            (0, 13, 14, 15, 16),
            (0, 17, 18, 19, 20),
        ]
    def __init__(self,
                 split='train',
                 root='/sequoia/data2/dataset/handatasets/stereohands',
                 joint_nb=21,
                 use_cache=False,
                 gt_detections=False):
        # Set cache path
        self.split = split
        self.use_cache = use_cache
        self.cache_folder = os.path.join('data', 'cache', 'stereohands')
        os.makedirs(self.cache_folder, exist_ok=True)
        self.gt_detections = gt_detections
        self.root = root
        self.joint_nb = joint_nb
        self.all_queries = [
            BaseQueries.manoidxs, BaseQueries.images, BaseQueries.joints2d,
            BaseQueries.joints3d, BaseQueries.sides
        ]
        trans_queries = get_trans_queries(self.all_queries)
        self.all_queries.extend(trans_queries)
        self.name = 'stereohands'

        self.manoidxs = list(range(1, 21))

        # Get file prefixes for images and annotations
        self.intr = np.array([[822.79041, 0, 318.47345],
                              [0, 822.79041, 250.31296], [0, 0, 1]])
        self.rgb_folder = os.path.join(root, "images")
        self.label_folder = os.path.join(root, "labels")
        self.right_template = 'BB_right_{}.png'
        self.left_template = 'BB_left_{}.png'

        # get paired links as neighboured joints
        self.links = [(0, 1, 2, 3, 4), (0, 5, 6, 7, 8), (0, 9, 10, 11, 12),
                      (0, 13, 14, 15, 16), (0, 17, 18, 19, 20)]
        if split == 'train':
            self.sequences = [
                'B2Counting', 'B2Random', 'B3Counting', 'B3Random',
                'B4Counting', 'B4Random', 'B5Counting', 'B5Random',
                'B6Counting', 'B6Random'
            ]
        elif split == 'test':
            self.sequences = ['B1Counting', 'B1Random']
        elif split == 'val':
            self.sequences = ['B2Counting', 'B2Random']
        elif split == 'train_val':
            self.sequences = [
                'B3Counting', 'B3Random', 'B4Counting', 'B4Random',
                'B5Counting', 'B5Random', 'B6Counting', 'B6Random'
            ]
        elif split == 'all':
            self.sequences = [
                'B1Counting', 'B1Random', 'B2Counting', 'B2Random',
                'B3Counting', 'B3Random', 'B4Counting', 'B4Random',
                'B5Counting', 'B5Random', 'B6Counting', 'B6Random'
            ]
        else:
            raise ValueError('split {} not in [train|test|val|train_val|all]')
        self.split = split
        self.center_path = os.path.join(root, "detections",
                                        'centers_{}.txt'.format(self.split))
        self.scale_path = os.path.join(root, "detections",
                                       'scales_{}.txt'.format(self.split))
        self.bbox_path = os.path.join(root, "detections",
                                      'bboxes_{}.txt'.format(self.split))
        self.load_dataset()

        # Load normalization bone values
        mean_file = 'bones_synthgrasps_root_palm.pkl'
        with open(
                os.path.join(
                    '/sequoia/data1/yhasson/code/pose_3d/handobjectdatasets/misc',
                    'stats', mean_file), 'rb') as p_f:
            grasp_data = pickle.load(p_f)
        self.mano_means = grasp_data['means']
Пример #5
0
    def __init__(
        self,
        split="train",
        root=None,
        joint_nb=21,
        mini_factor=None,
        use_cache=False,
        root_palm=False,
        mode="obj",
        segment=False,
        override_scale=False,
        use_external_points=True,
        apply_obj_transform=True,
        segmented_depth=True,
        shapenet_root="datasymlinks/ShapeNetCore.v2",
        obman_root="datasymlinks/obman",
    ):
        # Set cache path
        self.split = split
        obman_root = os.path.join(obman_root, split)
        self.override_scale = override_scale  # Use fixed scale
        self.root_palm = root_palm
        self.mode = mode
        self.segment = segment
        self.apply_obj_transform = apply_obj_transform
        self.segmented_depth = segmented_depth

        self.use_external_points = use_external_points
        if mode == "all" and not self.override_scale:
            self.all_queries = [
                BaseQueries.images,
                BaseQueries.joints2d,
                BaseQueries.joints3d,
                BaseQueries.sides,
                BaseQueries.segms,
                BaseQueries.verts3d,
                BaseQueries.hand_pcas,
                BaseQueries.hand_poses,
                BaseQueries.camintrs,
                BaseQueries.depth,
            ]
            if use_external_points:
                self.all_queries.append(BaseQueries.objpoints3d)
            else:
                self.all_queries.append(BaseQueries.objverts3d)
                self.all_queries.append(BaseQueries.objfaces)
            self.rgb_folder = os.path.join(obman_root, "rgb")
        elif mode == "obj" or (self.mode == "all" and self.override_scale):
            self.all_queries = [BaseQueries.images, BaseQueries.camintrs]
            if use_external_points:
                self.all_queries.append(BaseQueries.objpoints3d)
            else:
                self.all_queries.append(BaseQueries.objpoints3d)
                self.all_queries.append(BaseQueries.objverts3d)
                self.all_queries.append(BaseQueries.objfaces)
            if mode == "obj":
                self.rgb_folder = os.path.join(obman_root, "rgb_obj")
            else:
                self.rgb_folder = os.path.join(obman_root, "rgb")
        elif mode == "hand":
            self.all_queries = [
                BaseQueries.images,
                BaseQueries.joints2d,
                BaseQueries.joints3d,
                BaseQueries.sides,
                BaseQueries.segms,
                BaseQueries.verts3d,
                BaseQueries.hand_pcas,
                BaseQueries.hand_poses,
                BaseQueries.camintrs,
                BaseQueries.depth,
            ]
            self.rgb_folder = os.path.join(obman_root, "rgb_hand")
        else:
            raise ValueError(
                "Mode should be in [all|obj|hand], got {}".format(mode))

        trans_queries = get_trans_queries(self.all_queries)
        self.all_queries.extend(trans_queries)

        # Cache information
        self.use_cache = use_cache
        self.name = "obman"
        self.cache_folder = os.path.join("data", "cache", self.name)
        os.makedirs(self.cache_folder, exist_ok=True)
        self.mini_factor = mini_factor
        self.cam_intr = np.array([[480.0, 0.0, 128.0], [0.0, 480.0, 128.0],
                                  [0.0, 0.0, 1.0]]).astype(np.float32)

        self.cam_extr = np.array([
            [1.0, 0.0, 0.0, 0.0],
            [0.0, -1.0, 0.0, 0.0],
            [0.0, 0.0, -1.0, 0.0],
        ]).astype(np.float32)

        self.joint_nb = joint_nb
        self.segm_folder = os.path.join(obman_root, "segm")

        self.prefix_template = "{:08d}"
        self.meta_folder = os.path.join(obman_root, "meta")
        self.coord2d_folder = os.path.join(obman_root, "coords2d")

        # Define links on skeleton
        self.links = [
            (0, 1, 2, 3, 4),
            (0, 5, 6, 7, 8),
            (0, 9, 10, 11, 12),
            (0, 13, 14, 15, 16),
            (0, 17, 18, 19, 20),
        ]

        # Object info
        self.shapenet_template = os.path.join(
            shapenet_root, "{}/{}/models/model_normalized.pkl")
        self.load_dataset()
Пример #6
0
    def __init__(
            self,
            split='train',
            split_type='subjects',
            original_subject_split=True,
            joint_nb=21,
            use_cache=False,
            mini_factor=None,
            use_objects=True,
            remove_objects=None,  # !! overriden for now
            test_object='juice_bottle',
            filter_no_contact=True,
            filter_thresh=10,
            topology=None,
            filter_object=None,
            override_scale=False):
        """
        Args:
            topology: if 0, juice_bottle, salt, liquid_soap, if 1 milk
        """
        super().__init__()
        self.all_queries = [
            BaseQueries.images, BaseQueries.joints2d, BaseQueries.joints3d,
            BaseQueries.sides, BaseQueries.camintrs, BaseQueries.meta
        ]
        self.use_objects = use_objects
        self.filter_no_contact = filter_no_contact
        self.filter_thresh = filter_thresh
        self.override_scale = override_scale

        if self.use_objects:
            self.all_queries.append(BaseQueries.objverts3d)
            self.all_queries.append(BaseQueries.objpoints2d)
            self.all_queries.append(BaseQueries.objfaces)
        if self.use_objects:  ## Overriding
            self.remove_objects = False
        else:
            self.remove_objects = False

        self.topology = topology
        self.test_object = test_object
        trans_queries = get_trans_queries(self.all_queries)
        self.all_queries.extend(trans_queries)
        # Set cache path
        self.use_cache = use_cache
        self.cache_folder = os.path.join('data', 'cache', 'fhb')
        os.makedirs(self.cache_folder, exist_ok=True)
        self.cam_extr = np.array(
            [[0.999988496304, -0.00468848412856, 0.000982563360594, 25.7],
             [0.00469115935266, 0.999985218048, -0.00273845880292, 1.22],
             [-0.000969709653873, 0.00274303671904, 0.99999576807,
              3.902], [0, 0, 0, 1]])
        self.cam_intr = np.array([[1395.749023, 0, 935.732544],
                                  [0, 1395.749268, 540.681030], [0, 0, 1]])

        self.reorder_idx = np.array([
            0, 1, 6, 7, 8, 2, 9, 10, 11, 3, 12, 13, 14, 4, 15, 16, 17, 5, 18,
            19, 20
        ])
        self.name = 'fhb'
        self.joint_nb = joint_nb
        self.mini_factor = mini_factor
        split_opts = ['action', 'objects', 'subjects']
        self.subjects = [
            'Subject_1', 'Subject_2', 'Subject_3', 'Subject_4', 'Subject_5',
            'Subject_6'
        ]
        if split_type not in split_opts:
            raise ValueError('Split for dataset {} should be in {}, got {}'.
                             format(self.name, split_opts, split_type))

        self.split_type = split_type
        self.original_subject_split = original_subject_split

        self.root = '/sequoia/data2/dataset/handatasets/fhb'
        self.info_root = os.path.join(self.root, 'Subjects_info')
        self.info_split = os.path.join(self.root,
                                       'data_split_action_recognition.txt')
        self.rgb_root = os.path.join(self.root, 'process_yana', 'videos_480')
        self.skeleton_root = os.path.join(self.root, 'Hand_pose_annotation_v1')
        self.filter_object = filter_object
        # Get file prefixes for images and annotations
        self.split = split
        self.rgb_template = 'color_{:04d}.jpeg'
        # Joints are numbered from tip to base, we want opposite
        self.idxs = [
            0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19
        ]
        self.load_dataset()

        print('Got {} samples for split {}'.format(
            len(self.image_names), self.split))

        # get paired links as neighboured joints
        self.links = [(0, 1, 2, 3, 4), (0, 5, 6, 7, 8), (0, 9, 10, 11, 12),
                      (0, 13, 14, 15, 16), (0, 17, 18, 19, 20)]

        # Load normalization bone values
        mean_file = 'bones_synthgrasps_root_wrist.pkl'
        with open(
                os.path.join(
                    '/sequoia/data1/yhasson/code/pose_3d/handobjectdatasets/misc',
                    'stats', mean_file), 'rb') as p_f:
            grasp_data = pickle.load(p_f)
        self.mano_means = grasp_data['means']
Пример #7
0
    def __init__(
        self,
        split="train",
        root="/sequoia/data2/dataset/handatasets/stereohands",
        joint_nb=21,
        use_cache=False,
        gt_detections=False,
    ):
        # Set cache path
        self.split = split
        self.use_cache = use_cache
        self.cache_folder = os.path.join("data", "cache", "stereohands")
        os.makedirs(self.cache_folder, exist_ok=True)
        self.gt_detections = gt_detections
        self.root = root
        self.joint_nb = joint_nb
        self.all_queries = [
            BaseQueries.manoidxs,
            BaseQueries.images,
            BaseQueries.joints2d,
            BaseQueries.joints3d,
            BaseQueries.sides,
        ]
        trans_queries = get_trans_queries(self.all_queries)
        self.all_queries.extend(trans_queries)
        self.name = "stereohands"

        self.manoidxs = list(range(1, 21))

        # Get file prefixes for images and annotations
        self.intr = np.array([[822.79041, 0, 318.47345],
                              [0, 822.79041, 250.31296], [0, 0, 1]])
        self.rgb_folder = os.path.join(root, "images")
        self.label_folder = os.path.join(root, "labels")
        self.right_template = "BB_right_{}.png"
        self.left_template = "BB_left_{}.png"

        # get paired links as neighboured joints
        self.links = [
            (0, 1, 2, 3, 4),
            (0, 5, 6, 7, 8),
            (0, 9, 10, 11, 12),
            (0, 13, 14, 15, 16),
            (0, 17, 18, 19, 20),
        ]
        if split == "train":
            self.sequences = [
                "B2Counting",
                "B2Random",
                "B3Counting",
                "B3Random",
                "B4Counting",
                "B4Random",
                "B5Counting",
                "B5Random",
                "B6Counting",
                "B6Random",
            ]
        elif split == "test":
            self.sequences = ["B1Counting", "B1Random"]
        elif split == "val":
            self.sequences = ["B2Counting", "B2Random"]
        elif split == "train_val":
            self.sequences = [
                "B3Counting",
                "B3Random",
                "B4Counting",
                "B4Random",
                "B5Counting",
                "B5Random",
                "B6Counting",
                "B6Random",
            ]
        elif split == "all":
            self.sequences = [
                "B1Counting",
                "B1Random",
                "B2Counting",
                "B2Random",
                "B3Counting",
                "B3Random",
                "B4Counting",
                "B4Random",
                "B5Counting",
                "B5Random",
                "B6Counting",
                "B6Random",
            ]
        else:
            raise ValueError("split {} not in [train|test|val|train_val|all]")
        self.split = split
        self.center_path = os.path.join(root, "detections",
                                        "centers_{}.txt".format(self.split))
        self.scale_path = os.path.join(root, "detections",
                                       "scales_{}.txt".format(self.split))
        self.bbox_path = os.path.join(root, "detections",
                                      "bboxes_{}.txt".format(self.split))
        self.load_dataset()