Ejemplo n.º 1
0
    def __init__(self, config):
        CameraInterface.__init__(self, config)
        self.bvh_tree = None

        self.rotations = []
        self.translations = []

        self.var_rot, self.var_translation = 0.0, 0.0
        self.check_pose_novelty_rot = self.config.get_bool(
            "check_pose_novelty_rot", False)
        self.check_pose_novelty_translation = self.config.get_bool(
            "check_pose_novelty_translation", False)

        self.min_var_diff_rot = self.config.get_float("min_var_diff_rot",
                                                      sys.float_info.min)
        if self.min_var_diff_rot == -1.0:
            self.min_var_diff_rot = sys.float_info.min

        self.min_var_diff_translation = self.config.get_float(
            "min_var_diff_translation", sys.float_info.min)
        if self.min_var_diff_translation == -1.0:
            self.min_var_diff_translation = sys.float_info.min

        self.cam_pose_collection = ItemCollection(
            self._sample_cam_poses,
            self.config.get_raw_dict("default_cam_param", {}))
        self.validated_poses = 0
Ejemplo n.º 2
0
 def __init__(self, config):
     CameraInterface.__init__(self, config)
     # A dict specifying the length of parameters that require more than one argument. If not specified, 1 is assumed.
     self.number_of_arguments_per_parameter = {
         "location": 3,
         "rotation/value": 3,
         "cam2world_matrix": 16
     }
     self.cam_pose_collection = ItemCollection(self._add_cam_pose, self.config.get_raw_dict("default_cam_param", {}))
Ejemplo n.º 3
0
    def run(self):
        # bpy.context.scene.world["category_id"] = 0
        bpy.context.scene.render.resolution_x = self.camera_info['width']
        bpy.context.scene.render.resolution_y = self.camera_info['height']

        # Collect camera and camera object
        cam_ob = bpy.context.scene.camera
        cam = cam_ob.data
        cam['loaded_intrinsics'] = np.array(
            [[self.camera_info['fx'], 0, self.camera_info['cx']],
             [0, self.camera_info['fy'], self.camera_info['cy']], [0, 0, 1]])
        cam['loaded_resolution'] = self.camera_info['width'], self.camera_info[
            'height']

        config = Config({})
        camera_module = CameraInterface(config)
        camera_module._set_cam_intrinsics(cam, config)

        loaded_objects = []

        # only load all/selected objects here, use other modules for setting poses
        # e.g. camera.CameraSampler / object.ObjectPoseSampler
        obj_ids = list(range(self.obj_df.shape[0]))
        # if sampling is enabled
        if self.sample_objects:
            loaded_ids = {}
            loaded_amount = 0
            if self.obj_instances_limit != -1 and len(
                    obj_ids
            ) * self.obj_instances_limit < self.num_of_objs_to_sample:
                raise RuntimeError(
                    "{}'s {} split contains {} objects, {} object where requested to sample with "
                    "an instances limit of {}. Raise the limit amount or decrease the requested "
                    "amount of objects.".format(self.bop_dataset_path,
                                                self.split, len(obj_ids),
                                                self.num_of_objs_to_sample,
                                                self.obj_instances_limit))
            while loaded_amount != self.num_of_objs_to_sample:
                obj_id = random.choice(obj_ids)
                if obj_id not in loaded_ids.keys():
                    loaded_ids.update({obj_id: 0})
                # if there is no limit or if there is one, but it is not reached for this particular object
                if self.obj_instances_limit == -1 or loaded_ids[
                        obj_id] < self.obj_instances_limit:
                    cur_obj = self._load_mesh(obj_id, scale=self.scale)
                    loaded_ids[obj_id] += 1
                    loaded_amount += 1
                    loaded_objects.append(cur_obj)
        else:
            for obj_id in obj_ids:
                cur_obj = self._load_mesh(obj_id, scale=self.scale)
                loaded_objects.append(cur_obj)
        self._set_properties(loaded_objects)
Ejemplo n.º 4
0
    def run(self):
        """ Load BOP data """

        datasets_path = os.path.dirname(self.bop_dataset_path)
        dataset = os.path.basename(self.bop_dataset_path)

        print("bob: {}, dataset_path: {}".format(self.bop_dataset_path,
                                                 datasets_path))
        print("dataset: {}".format(dataset))

        try:
            from bop_toolkit_lib import dataset_params, inout
        except ImportError as error:
            print(
                'ERROR: Please download the bop_toolkit package and add it to sys_paths in config!'
            )
            print('https://github.com/thodan/bop_toolkit')
            raise error

        model_p = dataset_params.get_model_params(
            datasets_path,
            dataset,
            model_type=self.model_type if self.model_type else None)
        cam_p = dataset_params.get_camera_params(
            datasets_path,
            dataset,
            cam_type=self.cam_type if self.cam_type else None)

        try:
            split_p = dataset_params.get_split_params(datasets_path,
                                                      dataset,
                                                      split=self.split)
        except ValueError:
            raise Exception(
                "Wrong path or {} split does not exist in {}.".format(
                    self.split, dataset))

        bpy.context.scene.world["category_id"] = 0
        bpy.context.scene.render.resolution_x = cam_p['im_size'][0]
        bpy.context.scene.render.resolution_y = cam_p['im_size'][1]

        # Collect camera and camera object
        cam_ob = bpy.context.scene.camera
        cam = cam_ob.data

        cam['loaded_intrinsics'] = cam_p['K']
        cam['loaded_resolution'] = split_p['im_size'][0], split_p['im_size'][1]

        # TLESS exception because images are cropped
        if self.bop_dataset_name in ['tless']:
            cam['loaded_intrinsics'][2] = split_p['im_size'][0] / 2
            cam['loaded_intrinsics'][5] = split_p['im_size'][1] / 2

        config = Config({})
        camera_module = CameraInterface(config)
        camera_module._set_cam_intrinsics(cam, config)

        loaded_objects = []

        # only load all/selected objects here, use other modules for setting poses
        # e.g. camera.CameraSampler / object.ObjectPoseSampler
        if self.scene_id == -1:
            obj_ids = self.obj_ids if self.obj_ids else model_p['obj_ids']
            # if sampling is enabled
            if self.sample_objects:
                loaded_ids = {}
                loaded_amount = 0
                if self.obj_instances_limit != -1 and len(
                        obj_ids
                ) * self.obj_instances_limit < self.num_of_objs_to_sample:
                    raise RuntimeError(
                        "{}'s {} split contains {} objects, {} object where requested to sample with "
                        "an instances limit of {}. Raise the limit amount or decrease the requested "
                        "amount of objects.".format(self.bop_dataset_path,
                                                    self.split, len(obj_ids),
                                                    self.num_of_objs_to_sample,
                                                    self.obj_instances_limit))
                while loaded_amount != self.num_of_objs_to_sample:
                    random_id = choice(obj_ids)
                    if random_id not in loaded_ids.keys():
                        loaded_ids.update({random_id: 0})
                    # if there is no limit or if there is one, but it is not reached for this particular object
                    if self.obj_instances_limit == -1 or loaded_ids[
                            random_id] < self.obj_instances_limit:
                        cur_obj = self._load_mesh(random_id,
                                                  model_p,
                                                  scale=self.scale)
                        loaded_ids[random_id] += 1
                        loaded_amount += 1
                        loaded_objects.append(cur_obj)
                    else:
                        print(
                            "ID {} was loaded {} times with limit of {}. Total loaded amount {} while {} are "
                            "being requested".format(
                                random_id, loaded_ids[random_id],
                                self.obj_instances_limit, loaded_amount,
                                self.num_of_objs_to_sample))
            else:
                for obj_id in obj_ids:
                    cur_obj = self._load_mesh(obj_id,
                                              model_p,
                                              scale=self.scale)
                    loaded_objects.append(cur_obj)
            self._set_properties(loaded_objects)

        # replicate scene: load scene objects, object poses, camera intrinsics and camera poses
        else:
            sc_gt = inout.load_scene_gt(split_p['scene_gt_tpath'].format(
                **{'scene_id': self.scene_id}))
            sc_camera = inout.load_json(split_p['scene_camera_tpath'].format(
                **{'scene_id': self.scene_id}))
            for i, (cam_id, insts) in enumerate(sc_gt.items()):
                cam_K, cam_H_m2c_ref = self._get_ref_cam_extrinsics_intrinsics(
                    sc_camera, cam_id, insts, self.scale)

                if i == 0:
                    # define world = first camera
                    cam_H_m2w_ref = cam_H_m2c_ref.copy()

                    cur_objs = []
                    # load scene objects and set their poses
                    for inst in insts:
                        cur_objs.append(
                            self._load_mesh(inst['obj_id'],
                                            model_p,
                                            scale=self.scale))
                        self.set_object_pose(cur_objs[-1], inst, self.scale)

                cam_H_c2w = self._compute_camera_to_world_trafo(
                    cam_H_m2w_ref, cam_H_m2c_ref)
                #set camera intrinsics and extrinsics
                config = Config({
                    "cam2world_matrix": list(cam_H_c2w.flatten()),
                    "cam_K": list(cam_K.flatten())
                })
                camera_module._set_cam_intrinsics(cam, config)
                camera_module._set_cam_extrinsics(cam_ob, config)

                # Store new cam pose as next frame
                frame_id = bpy.context.scene.frame_end
                # Copy object poses to next key frame (to be sure)
                for cur_obj in cur_objs:
                    self._insert_key_frames(cur_obj, frame_id)
                camera_module._insert_key_frames(cam, cam_ob, frame_id)
                bpy.context.scene.frame_end = frame_id + 1