Exemplo n.º 1
0
def main(_):
  tf.logging.set_verbosity(tf.logging.INFO)

  if FLAGS.scene_ids is not None and FLAGS.targets_filename is not None:
    raise ValueError(
      'Only up to one of scene_ids and targets_filename can be specified.')

  # Load dataset parameters.
  dp_split = dataset_params.get_split_params(
    config.BOP_PATH, FLAGS.dataset, FLAGS.split, FLAGS.split_type)

  output_suffix = None

  if FLAGS.targets_filename:
    output_suffix = 'targets'
    test_targets = inout.load_json(
      os.path.join(config.BOP_PATH, FLAGS.dataset, FLAGS.targets_filename))
    example_list = []
    for trg in test_targets:
      example = {'scene_id': trg['scene_id'], 'im_id': trg['im_id']}
      if example not in example_list:
        example_list.append(example)

  else:
    if FLAGS.scene_ids is None:
      FLAGS.scene_ids = dataset_params.get_present_scene_ids(dp_split)
    else:
      FLAGS.scene_ids = list(map(int, FLAGS.scene_ids))
      output_suffix = 'scenes-' + '-'.join(
        map(lambda x: '{:01d}'.format(x), FLAGS.scene_ids))

    tf.logging.info('Collecting examples...')
    example_list = []
    for scene_id in FLAGS.scene_ids:
      scene_gt_fpath = dp_split['scene_gt_tpath'].format(scene_id=scene_id)
      im_ids = inout.load_scene_gt(scene_gt_fpath).keys()
      for im_id in sorted(im_ids):
        example_list.append({'scene_id': scene_id, 'im_id': im_id})

  tf.logging.info('Collected {} examples.'.format(len(example_list)))
  assert(len(example_list) > 0)

  split_name = FLAGS.split
  if FLAGS.split_type is not None:
    split_name += '-' + FLAGS.split_type

  if output_suffix is not None:
    output_suffix = '_' + output_suffix
  else:
    output_suffix = ''

  output_fname = '{}_{}{}_examples.txt'.format(
    FLAGS.dataset, split_name, output_suffix)
  output_fpath = os.path.join(FLAGS.output_dir, output_fname)

  tf.logging.info('Saving the list to: {}'.format(output_fpath))
  if not os.path.exists(FLAGS.output_dir):
    os.makedirs(FLAGS.output_dir)
  tfrecord.save_example_list(output_fpath, example_list)
Exemplo n.º 2
0
    def __init__(self, name):
        """Constructor.

    Args:
      name: Dataset name. E.g., 's0_test'.
    """
        self._name = name

        self._dataset = get_dataset(self._name)

        self._setup = self._name.split('_')[0]
        self._split = self._name.split('_')[1]

        self._out_dir = os.path.join(os.path.dirname(__file__), "..",
                                     "results")
        self._bop_dir = os.path.join(self._dataset.data_dir, "bop")

        self._p = {
            'errors': [
                {
                    'n_top': -1,
                    'type': 'vsd',
                    'vsd_delta': 15,
                    'vsd_taus': list(np.arange(0.05, 0.51, 0.05)),
                    'correct_th': [[th] for th in np.arange(0.05, 0.51, 0.05)]
                },
                {
                    'n_top': -1,
                    'type': 'mssd',
                    'correct_th': [[th] for th in np.arange(0.05, 0.51, 0.05)]
                },
                {
                    'n_top': -1,
                    'type': 'mspd',
                    'correct_th': [[th] for th in np.arange(5, 51, 5)]
                },
            ],
            'visib_gt_min':
            -1,
        }

        dp_split = dataset_params.get_split_params(self._bop_dir, self._setup,
                                                   self._split)
        dp_model = dataset_params.get_model_params(self._bop_dir,
                                                   self._setup,
                                                   model_type='eval')
        self._scene_ids = dp_split['scene_ids']
        self._obj_ids = dp_model['obj_ids']

        self._grasp_id = defaultdict(lambda: {})
        for i in range(len(self._dataset)):
            sample = self._dataset[i]
            scene_id, im_id = self._dataset.get_bop_id_from_idx(i)
            obj_id = sample['ycb_ids'][sample['ycb_grasp_ind']]
            self._grasp_id[scene_id][im_id] = obj_id
Exemplo n.º 3
0
    ests_counter = 0
    time_start = time.time()

    # Parse info about the method and the dataset from the filename.
    result_name = os.path.splitext(os.path.basename(result_filename))[0]
    result_info = result_name.split('_')
    method = str(result_info[0])
    dataset_info = result_info[1].split('-')
    dataset = str(dataset_info[0])
    split = str(dataset_info[1])
    split_type = str(dataset_info[2]) if len(dataset_info) > 2 else None
    split_type_str = ' - ' + split_type if split_type is not None else ''

    # Load dataset parameters.
    dp_split = dataset_params.get_split_params(p['datasets_path'], dataset,
                                               split, split_type)

    model_type = 'eval'
    dp_model = dataset_params.get_model_params(p['datasets_path'], dataset,
                                               model_type)

    # Load object models.
    models = {}
    if p['error_type'] in ['ad', 'add', 'adi', 'mssd', 'mspd', 'proj']:
        misc.log('Loading object models...')
        for obj_id in dp_model['obj_ids']:
            models[obj_id] = inout.load_ply(
                dp_model['model_tpath'].format(obj_id=obj_id))

    # Load models info.
    models_info = None
Exemplo n.º 4
0
    'dataset': 'lm',

    # Dataset split. Options: 'train', 'val', 'test'.
    'dataset_split': 'test',

    # Dataset split type. None = default. See dataset_params.py for options.
    'dataset_split_type': None,

    # Folder containing the BOP datasets.
    'datasets_path': config.datasets_path,
}
################################################################################

# Load dataset parameters.
dp_split = dataset_params.get_split_params(p['datasets_path'], p['dataset'],
                                           p['dataset_split'],
                                           p['dataset_split_type'])

scene_ids = dp_split['scene_ids']
dists = []
azimuths = []
elevs = []
visib_fracts = []
ims_count = 0
for scene_id in scene_ids:
    misc.log('Processing - dataset: {} ({}, {}), scene: {}'.format(
        p['dataset'], p['dataset_split'], p['dataset_split_type'], scene_id))

    # Load GT poses.
    scene_gt = inout.load_scene_gt(
        dp_split['scene_gt_tpath'].format(scene_id=scene_id))
Exemplo n.º 5
0
    def load(bop_dataset_path: str,
             temp_dir: str,
             sys_paths: list,
             model_type: str = "",
             cam_type: str = "",
             split: str = "test",
             scene_id: int = -1,
             obj_ids: list = [],
             sample_objects: bool = False,
             num_of_objs_to_sample: int = None,
             obj_instances_limit: int = -1,
             move_origin_to_x_y_plane: bool = False,
             source_frame: list = ["X", "-Y", "-Z"],
             mm2m: bool = False) -> List[MeshObject]:
        """ Loads the 3D models of any BOP dataset and allows replicating BOP scenes

        - Interfaces with the bob_toolkit, allows loading of train, val and test splits
        - Relative camera poses are loaded/computed with respect to a reference model
        - Sets real camera intrinsics

        :param bop_dataset_path: Full path to a specific bop dataset e.g. /home/user/bop/tless.
        :param temp_dir: A temp directory which is used for writing the temporary .ply file.
        :param sys_paths: System paths to append.
        :param model_type: Optionally, specify type of BOP model.  Available: [reconst, cad or eval].
        :param cam_type: Camera type. If not defined, dataset-specific default camera type is used.
        :param split: Optionally, test or val split depending on BOP dataset.
        :param scene_id: Optionally, specify BOP dataset scene to synthetically replicate. Default: -1 (no scene is replicated,
                         only BOP Objects are loaded).
        :param obj_ids: List of object ids to load. Default: [] (all objects from the given BOP dataset if scene_id is not
                        specified).
        :param sample_objects: Toggles object sampling from the specified dataset.
        :param num_of_objs_to_sample: Amount of objects to sample from the specified dataset. If this amount is bigger than the dataset
                                      actually contains, then all objects will be loaded.
        :param obj_instances_limit: Limits the amount of object copies when sampling. Default: -1 (no limit).
        :param move_origin_to_x_y_plane: Move center of the object to the lower side of the object, this will not work when used in combination with
                                         pose estimation tasks! This is designed for the use-case where BOP objects are used as filler objects in
                                         the background.
        :param source_frame: Can be used if the given positions and rotations are specified in frames different from the blender
                            frame. Has to be a list of three strings. Example: ['X', '-Z', 'Y']: Point (1,2,3) will be transformed
                            to (1, -3, 2). Available: ['X', 'Y', 'Z', '-X', '-Y', '-Z'].
        :param mm2m: Specify whether to convert poses and models to meters.
        :return: The list of loaded mesh objects.
        """

        for sys_path in sys_paths:
            if 'bop_toolkit' in sys_path:
                sys.path.append(sys_path)

        scale = 0.001 if mm2m else 1
        bop_dataset_name = os.path.basename(bop_dataset_path)
        has_external_texture = bop_dataset_name in ["ycbv", "ruapc"]
        if obj_ids or sample_objects:
            allow_duplication = True
        else:
            allow_duplication = False

        datasets_path = os.path.dirname(bop_dataset_path)
        dataset = os.path.basename(bop_dataset_path)

        print("bob: {}, dataset_path: {}".format(bop_dataset_path,
                                                 datasets_path))
        print("dataset: {}".format(dataset))

        try:
            from bop_toolkit_lib import dataset_params, inout
        except ImportError as error:
            print(
                'ERROR: Please download the bop_toolkit package and add it to sys_paths in config!'
            )
            print('https://github.com/thodan/bop_toolkit')
            raise error

        model_p = dataset_params.get_model_params(
            datasets_path,
            dataset,
            model_type=model_type if model_type else None)
        cam_p = dataset_params.get_camera_params(
            datasets_path, dataset, cam_type=cam_type if cam_type else None)

        try:
            split_p = dataset_params.get_split_params(datasets_path,
                                                      dataset,
                                                      split=split)
        except ValueError:
            raise Exception(
                "Wrong path or {} split does not exist in {}.".format(
                    split, dataset))

        bpy.context.scene.world["category_id"] = 0
        bpy.context.scene.render.resolution_x = cam_p['im_size'][0]
        bpy.context.scene.render.resolution_y = cam_p['im_size'][1]

        loaded_objects = []

        # only load all/selected objects here, use other modules for setting poses
        # e.g. camera.CameraSampler / object.ObjectPoseSampler
        if scene_id == -1:

            # TLESS exception because images are cropped
            if bop_dataset_name in ['tless']:
                cam_p['K'][0, 2] = split_p['im_size'][0] / 2
                cam_p['K'][1, 2] = split_p['im_size'][1] / 2

            # set camera intrinsics
            CameraUtility.set_intrinsics_from_K_matrix(cam_p['K'],
                                                       split_p['im_size'][0],
                                                       split_p['im_size'][1])

            obj_ids = obj_ids if obj_ids else model_p['obj_ids']
            # if sampling is enabled
            if sample_objects:
                loaded_ids = {}
                loaded_amount = 0
                if obj_instances_limit != -1 and len(
                        obj_ids) * obj_instances_limit < num_of_objs_to_sample:
                    raise RuntimeError(
                        "{}'s {} split contains {} objects, {} object where requested to sample with "
                        "an instances limit of {}. Raise the limit amount or decrease the requested "
                        "amount of objects.".format(bop_dataset_path, split,
                                                    len(obj_ids),
                                                    num_of_objs_to_sample,
                                                    obj_instances_limit))
                while loaded_amount != num_of_objs_to_sample:
                    random_id = choice(obj_ids)
                    if random_id not in loaded_ids.keys():
                        loaded_ids.update({random_id: 0})
                    # if there is no limit or if there is one, but it is not reached for this particular object
                    if obj_instances_limit == -1 or loaded_ids[
                            random_id] < obj_instances_limit:
                        cur_obj = BopLoader._load_mesh(random_id, model_p,
                                                       bop_dataset_name,
                                                       has_external_texture,
                                                       temp_dir,
                                                       allow_duplication,
                                                       scale)
                        loaded_ids[random_id] += 1
                        loaded_amount += 1
                        loaded_objects.append(cur_obj)
                    else:
                        print(
                            "ID {} was loaded {} times with limit of {}. Total loaded amount {} while {} are "
                            "being requested".format(random_id,
                                                     loaded_ids[random_id],
                                                     obj_instances_limit,
                                                     loaded_amount,
                                                     num_of_objs_to_sample))
            else:
                for obj_id in obj_ids:
                    cur_obj = BopLoader._load_mesh(obj_id, model_p,
                                                   bop_dataset_name,
                                                   has_external_texture,
                                                   temp_dir, allow_duplication,
                                                   scale)
                    loaded_objects.append(cur_obj)

        # replicate scene: load scene objects, object poses, camera intrinsics and camera poses
        else:
            sc_gt = inout.load_scene_gt(
                split_p['scene_gt_tpath'].format(**{'scene_id': scene_id}))
            sc_camera = inout.load_json(
                split_p['scene_camera_tpath'].format(**{'scene_id': scene_id}))
            for i, (cam_id, insts) in enumerate(sc_gt.items()):
                cam_K, cam_H_m2c_ref = BopLoader._get_ref_cam_extrinsics_intrinsics(
                    sc_camera, cam_id, insts, scale)

                if i == 0:
                    # define world = first camera
                    cam_H_m2w_ref = cam_H_m2c_ref.copy()

                    cur_objs = []
                    # load scene objects and set their poses
                    for inst in insts:
                        cur_objs.append(
                            BopLoader._load_mesh(inst['obj_id'], model_p,
                                                 bop_dataset_name,
                                                 has_external_texture,
                                                 temp_dir, allow_duplication,
                                                 scale))
                        BopLoader.set_object_pose(cur_objs[-1], inst, scale)

                cam_H_c2w = BopLoader._compute_camera_to_world_trafo(
                    cam_H_m2w_ref, cam_H_m2c_ref, source_frame)
                # set camera intrinsics
                CameraUtility.set_intrinsics_from_K_matrix(
                    cam_K, split_p['im_size'][0], split_p['im_size'][1])

                # set camera extrinsics as next frame
                frame_id = CameraUtility.add_camera_pose(cam_H_c2w)

                # Add key frame for camera shift, as it changes from frame to frame in the tless replication
                cam = bpy.context.scene.camera.data
                cam.keyframe_insert(data_path='shift_x', frame=frame_id)
                cam.keyframe_insert(data_path='shift_y', frame=frame_id)

                # Copy object poses to key frame (to be sure)
                for cur_obj in cur_objs:
                    BopLoader._insert_key_frames(cur_obj, frame_id)

        # move the origin of the object to the world origin and on top of the X-Y plane
        # makes it easier to place them later on, this does not change the `.location`
        # This is only useful if the BOP objects are not used in a pose estimation scenario.
        if move_origin_to_x_y_plane:
            for obj in loaded_objects:
                obj.move_origin_to_bottom_mean_point()

        return loaded_objects
Exemplo n.º 6
0
    # Dataset split type. Options: 'synt', 'real', None = default. See dataset_params.py for options.
    'dataset_split_type': None,

    # Folder containing the BOP datasets.
    'datasets_path': config.datasets_path,
}
################################################################################

datasets_path = p['datasets_path']
dataset_name = p['dataset']
split = p['dataset_split']
split_type = p['dataset_split_type']

dp_split = dataset_params.get_split_params(datasets_path,
                                           dataset_name,
                                           split,
                                           split_type=split_type)
dp_model = dataset_params.get_model_params(datasets_path, dataset_name)

complete_split = split
if dp_split['split_type'] is not None:
    complete_split += '_' + dp_split['split_type']

CATEGORIES = [{
    'id': obj_id,
    'name': str(obj_id),
    'supercategory': dataset_name
} for obj_id in dp_model['obj_ids']]
INFO = {
    "description": dataset_name + '_' + split,
    "url": "https://github.com/thodan/bop_toolkit",
Exemplo n.º 7
0
    def run(self):
        """ Load BOP data """

        bop_dataset_path = self.config.get_string("bop_dataset_path")
        scene_id = self.config.get_int("scene_id", -1)
        obj_ids = self.config.get_list("obj_ids", [])
        split = self.config.get_string("split", "test")
        model_type = self.config.get_string("model_type", "")
        cam_type = self.config.get_string("cam_type", "")
        scale = 0.001 if self.config.get_bool("mm2m", False) else 1
        datasets_path = os.path.dirname(bop_dataset_path)
        dataset = os.path.basename(bop_dataset_path)

        print("bob: {}, dataset_path: {}".format(bop_dataset_path,
                                                 datasets_path))
        print("dataset: {}".format(dataset))

        try:
            from bop_toolkit_lib import dataset_params, inout
        except ImportError as error:
            print(
                'ERROR: Please download the bop_toolkit package and add it to sys_paths in config!'
            )
            print('https://github.com/thodan/bop_toolkit')
            raise error

        model_p = dataset_params.get_model_params(
            datasets_path,
            dataset,
            model_type=model_type if model_type else None)
        cam_p = dataset_params.get_camera_params(
            datasets_path, dataset, cam_type=cam_type if cam_type else None)
        bpy.data.scenes["Scene"]["num_labels"] = len(model_p['obj_ids'])

        try:
            split_p = dataset_params.get_split_params(datasets_path,
                                                      dataset,
                                                      split=split)
        except ValueError:
            raise Exception(
                "Wrong path or {} split does not exist in {}.".format(
                    split, dataset))

        bpy.context.scene.world["category_id"] = 0
        bpy.context.scene.render.resolution_x = self.config.get_int(
            "resolution_x", split_p['im_size'][0])
        bpy.context.scene.render.resolution_y = self.config.get_int(
            "resolution_y", split_p['im_size'][1])

        # Collect camera and camera object
        cam_ob = bpy.context.scene.camera
        cam = cam_ob.data
        cam['loaded_resolution'] = bpy.context.scene.render.resolution_x, bpy.context.scene.render.resolution_y
        cam['loaded_intrinsics'] = cam_p[
            'K']  # load default intrinsics from camera.json

        config = Config({})
        camera_module = CameraModule(config)
        camera_module._set_cam_intrinsics(cam, config)

        #only load all/selected objects here, use other modules for setting poses, e.g. camera.CameraSampler / object.ObjectPoseSampler
        if scene_id == -1:
            obj_ids = obj_ids if obj_ids else model_p['obj_ids']
            for obj_id in obj_ids:
                self._load_mesh(obj_id, model_p, scale=scale)
        # replicate scene: load scene objects, object poses, camera intrinsics and camera poses
        else:
            sc_gt = inout.load_scene_gt(
                split_p['scene_gt_tpath'].format(**{'scene_id': scene_id}))
            sc_camera = inout.load_json(
                split_p['scene_camera_tpath'].format(**{'scene_id': scene_id}))

            for i, (cam_id, insts) in enumerate(sc_gt.items()):

                cam_K, cam_H_m2c_ref = self._get_ref_cam_extrinsics_intrinsics(
                    sc_camera, cam_id, insts, scale)

                if i == 0:
                    # define world = first camera
                    cam_H_m2w_ref = cam_H_m2c_ref.copy()

                    # load scene objects
                    for inst in insts:
                        cur_obj = self._load_mesh(inst['obj_id'], model_p)
                        self.set_object_pose(cur_obj, inst, scale)

                cam_H_c2w = self._compute_camera_to_world_trafo(
                    cam_H_m2w_ref, cam_H_m2c_ref)

                #set camera intrinsics and extrinsics
                config = Config({
                    "cam2world_matrix": list(cam_H_c2w.flatten()),
                    "camK": list(cam_K.flatten())
                })
                camera_module._set_cam_intrinsics(cam, config)
                camera_module._set_cam_extrinsics(cam_ob, config)

                # Store new cam pose as next frame
                frame_id = bpy.context.scene.frame_end
                camera_module._insert_key_frames(cam, cam_ob, frame_id)
                bpy.context.scene.frame_end = frame_id + 1
  os.path.join('{out_path}', 'scene_gt', '{obj_id:06d}_scene_gt.json')
out_views_vis_tpath =\
  os.path.join('{out_path}', 'views_radius', '{obj_id:06d}_views_radius={radius}.ply')

# Load colors.
colors_path = os.path.join(
  os.path.dirname(inout.__file__), 'colors.json')
colors = inout.load_json(colors_path)
################################################################################


out_path = out_tpath.format(dataset=dataset)
misc.ensure_dir(out_path)

# Load dataset parameters.
dp_split_test = dataset_params.get_split_params(datasets_path, dataset, 'test')
dp_model = dataset_params.get_model_params(datasets_path, dataset, model_type)
dp_camera = dataset_params.get_camera_params(datasets_path, dataset, cam_type)

if not obj_ids:
  obj_ids = dp_model['obj_ids']

# Image size and K for the RGB image (potentially with SSAA).
im_size_rgb = [int(round(x * float(ssaa_fact))) for x in dp_camera['im_size']]
K_rgb = dp_camera['K'] * ssaa_fact

# Intrinsic parameters for RGB rendering.
fx_rgb, fy_rgb, cx_rgb, cy_rgb =\
  K_rgb[0, 0], K_rgb[1, 1], K_rgb[0, 2], K_rgb[1, 2]

# Intrinsic parameters for depth rendering.
Exemplo n.º 9
0
    def run(self):
        """ Load BOP data """

        datasets_path = os.path.dirname(self.bop_dataset_path)
        dataset = os.path.basename(self.bop_dataset_path)

        print("bob: {}, dataset_path: {}".format(self.bop_dataset_path,
                                                 datasets_path))
        print("dataset: {}".format(dataset))

        try:
            from bop_toolkit_lib import dataset_params, inout
        except ImportError as error:
            print(
                'ERROR: Please download the bop_toolkit package and add it to sys_paths in config!'
            )
            print('https://github.com/thodan/bop_toolkit')
            raise error

        model_p = dataset_params.get_model_params(
            datasets_path,
            dataset,
            model_type=self.model_type if self.model_type else None)
        cam_p = dataset_params.get_camera_params(
            datasets_path,
            dataset,
            cam_type=self.cam_type if self.cam_type else None)
        bpy.data.scenes["Scene"]["num_labels"] = len(model_p['obj_ids'])

        try:
            split_p = dataset_params.get_split_params(datasets_path,
                                                      dataset,
                                                      split=self.split)
        except ValueError:
            raise Exception(
                "Wrong path or {} split does not exist in {}.".format(
                    self.split, dataset))

        bpy.context.scene.world["category_id"] = 0
        bpy.context.scene.render.resolution_x = split_p['im_size'][0]
        bpy.context.scene.render.resolution_y = split_p['im_size'][1]

        # Collect camera and camera object
        cam_ob = bpy.context.scene.camera
        cam = cam_ob.data
        cam['loaded_resolution'] = split_p['im_size'][0], split_p['im_size'][1]
        # load default intrinsics from camera.json
        cam['loaded_intrinsics'] = cam_p['K']

        config = Config({})
        camera_module = CameraModule(config)
        camera_module._set_cam_intrinsics(cam, config)

        loaded_objects = []

        # only load all/selected objects here, use other modules for setting poses
        # e.g. camera.CameraSampler / object.ObjectPoseSampler
        if self.scene_id == -1:
            obj_ids = self.obj_ids if self.obj_ids else model_p['obj_ids']
            # if sampling is enabled
            if self.sample_objects:
                loaded_ids = {}
                loaded_amount = 0
                if self.obj_instances_limit != -1 and len(
                        obj_ids
                ) * self.obj_instances_limit < self.num_of_objs_to_sample:
                    raise RuntimeError(
                        "{}'s {} split contains {} objects, {} object where requested to sample with "
                        "an instances limit of {}. Raise the limit amount or decrease the requested "
                        "amount of objects.".format(self.bop_dataset_path,
                                                    self.split, len(obj_ids),
                                                    self.num_of_objs_to_sample,
                                                    self.obj_instances_limit))
                while loaded_amount != self.num_of_objs_to_sample:
                    random_id = choice(obj_ids)
                    if random_id not in loaded_ids.keys():
                        loaded_ids.update({random_id: 0})
                    # if there is no limit or if there is one, but it is not reached for this particular object
                    if self.obj_instances_limit == -1 or loaded_ids[
                            random_id] < self.obj_instances_limit:
                        cur_obj = self._load_mesh(random_id,
                                                  model_p,
                                                  scale=self.scale)
                        loaded_ids[random_id] += 1
                        loaded_amount += 1
                        loaded_objects.append(cur_obj)
                    else:
                        print(
                            "ID {} was loaded {} times with limit of {}. Total loaded amount {} while {} are "
                            "being requested".format(
                                random_id, loaded_ids[random_id],
                                self.obj_instances_limit, loaded_amount,
                                self.num_of_objs_to_sample))
            else:
                for obj_id in obj_ids:
                    cur_obj = self._load_mesh(obj_id,
                                              model_p,
                                              scale=self.scale)
                    loaded_objects.append(cur_obj)
            self._set_properties(loaded_objects)

        # replicate scene: load scene objects, object poses, camera intrinsics and camera poses
        else:
            sc_gt = inout.load_scene_gt(split_p['scene_gt_tpath'].format(
                **{'scene_id': self.scene_id}))
            sc_camera = inout.load_json(split_p['scene_camera_tpath'].format(
                **{'scene_id': self.scene_id}))
            for i, (cam_id, insts) in enumerate(sc_gt.items()):
                cam_K, cam_H_m2c_ref = self._get_ref_cam_extrinsics_intrinsics(
                    sc_camera, cam_id, insts, self.scale)

                if i == 0:
                    # define world = first camera
                    cam_H_m2w_ref = cam_H_m2c_ref.copy()

                    cur_objs = []
                    # load scene objects and set their poses
                    for inst in insts:
                        cur_objs.append(
                            self._load_mesh(inst['obj_id'],
                                            model_p,
                                            scale=self.scale))
                        self.set_object_pose(cur_objs[-1], inst, self.scale)

                cam_H_c2w = self._compute_camera_to_world_trafo(
                    cam_H_m2w_ref, cam_H_m2c_ref)
                #set camera intrinsics and extrinsics
                config = Config({
                    "cam2world_matrix": list(cam_H_c2w.flatten()),
                    "cam_K": list(cam_K.flatten())
                })
                camera_module._set_cam_intrinsics(cam, config)
                camera_module._set_cam_extrinsics(cam_ob, config)

                # Store new cam pose as next frame
                frame_id = bpy.context.scene.frame_end
                # Copy object poses to next key frame (to be sure)
                for cur_obj in cur_objs:
                    self._insert_key_frames(cur_obj, frame_id)
                camera_module._insert_key_frames(cam, cam_ob, frame_id)
                bpy.context.scene.frame_end = frame_id + 1
Exemplo n.º 10
0
    def run(self):
        bop_dataset_path = self.config.get_string("bop_dataset_path")
        scene_id = self.config.get_int("scene_id")
        split = self.config.get_string("split", "test")
        model_type = self.config.get_string("model_type", "")
        mm2m = 0.001 if self.config.get_bool("mm2m") else 1

        datasets_path = os.path.dirname(bop_dataset_path)
        dataset = os.path.basename(bop_dataset_path)
        print("bob: {}, dataset_path: {}".format(bop_dataset_path,
                                                 datasets_path))
        print("dataset: {}".format(dataset))

        model_p = dataset_params.get_model_params(
            datasets_path,
            dataset,
            model_type=model_type if model_type else None)
        camera_p = dataset_params.get_camera_params(datasets_path, dataset)

        try:
            split_p = dataset_params.get_split_params(datasets_path,
                                                      dataset,
                                                      split=split)
        except ValueError:
            raise Exception(
                "Wrong path or {} split does not exist in {}.".format(
                    split, dataset))

        sc_gt = inout.load_scene_gt(
            split_p['scene_gt_tpath'].format(**{'scene_id': scene_id}))
        sc_camera = inout.load_json(
            split_p['scene_camera_tpath'].format(**{'scene_id': scene_id}))

        bpy.context.scene.render.resolution_x = self.config.get_int(
            "resolution_x", split_p['im_size'][0])
        bpy.context.scene.render.resolution_y = self.config.get_int(
            "resolution_y", split_p['im_size'][1])
        #bpy.context.scene.render.pixel_aspect_x = self.config.get_float("pixel_aspect_x", 1) #split_p['im_size'][0] / split_p['im_size'][1])

        cm = CameraModule(self.config)

        for i, (cam_id, insts) in enumerate(sc_gt.items()):

            cam_K = np.array(sc_camera[str(cam_id)]['cam_K']).reshape(3, 3)

            cam_H_m2c_ref = np.eye(4)
            cam_H_m2c_ref[:3, :3] = np.array(insts[0]['cam_R_m2c']).reshape(
                3, 3)
            cam_H_m2c_ref[:3, 3] = np.array(
                insts[0]['cam_t_m2c']).reshape(3) * mm2m

            if i == 0:
                # define world = first camera
                cam_H_m2w_ref = cam_H_m2c_ref.copy()

                for inst in insts:

                    bpy.ops.import_mesh.ply(
                        filepath=model_p['model_tpath'].format(
                            **{'obj_id': inst['obj_id']}))

                    cam_H_m2c = np.eye(4)
                    cam_H_m2c[:3, :3] = np.array(inst['cam_R_m2c']).reshape(
                        3, 3)
                    cam_H_m2c[:3, 3] = np.array(
                        inst['cam_t_m2c']).reshape(3) * mm2m

                    # world = camera @ i=0
                    cam_H_m2w = cam_H_m2c
                    print('-----------------------------')
                    print("Model: {}".format(cam_H_m2w))
                    print('-----------------------------')

                    cur_obj = bpy.context.selected_objects[-1]
                    cur_obj.matrix_world = Matrix(cam_H_m2w)
                    cur_obj.scale = Vector((mm2m, mm2m, mm2m))

                    mat = self._load_materials(cur_obj)
                    self._link_col_node(mat)

            cam_H_c2w = np.dot(cam_H_m2w_ref, np.linalg.inv(cam_H_m2c_ref))

            print('-----------------------------')
            print("Cam: {}".format(cam_H_c2w))
            print('-----------------------------')

            config = {"location": [0, 0, 0], "rotation": list([0, 0, 0])}
            cm._add_cam_pose(Config(config), Matrix(cam_H_c2w), cam_K)
Exemplo n.º 11
0
    def run(self):
        """ Load BOP data """

        datasets_path = os.path.dirname(self.bop_dataset_path)
        dataset = os.path.basename(self.bop_dataset_path)

        print("bob: {}, dataset_path: {}".format(self.bop_dataset_path,
                                                 datasets_path))
        print("dataset: {}".format(dataset))

        try:
            from bop_toolkit_lib import dataset_params, inout
        except ImportError as error:
            print(
                'ERROR: Please download the bop_toolkit package and add it to sys_paths in config!'
            )
            print('https://github.com/thodan/bop_toolkit')
            raise error

        model_p = dataset_params.get_model_params(
            datasets_path,
            dataset,
            model_type=self.model_type if self.model_type else None)
        cam_p = dataset_params.get_camera_params(
            datasets_path,
            dataset,
            cam_type=self.cam_type if self.cam_type else None)

        try:
            split_p = dataset_params.get_split_params(datasets_path,
                                                      dataset,
                                                      split=self.split)
        except ValueError:
            raise Exception(
                "Wrong path or {} split does not exist in {}.".format(
                    self.split, dataset))

        bpy.context.scene.world["category_id"] = 0
        bpy.context.scene.render.resolution_x = cam_p['im_size'][0]
        bpy.context.scene.render.resolution_y = cam_p['im_size'][1]

        loaded_objects = []

        # only load all/selected objects here, use other modules for setting poses
        # e.g. camera.CameraSampler / object.ObjectPoseSampler
        if self.scene_id == -1:

            # TLESS exception because images are cropped
            if self.bop_dataset_name in ['tless']:
                cam_p['K'][0, 2] = split_p['im_size'][0] / 2
                cam_p['K'][1, 2] = split_p['im_size'][1] / 2

            # set camera intrinsics
            CameraUtility.set_intrinsics_from_K_matrix(cam_p['K'],
                                                       split_p['im_size'][0],
                                                       split_p['im_size'][1])

            obj_ids = self.obj_ids if self.obj_ids else model_p['obj_ids']
            # if sampling is enabled
            if self.sample_objects:
                loaded_ids = {}
                loaded_amount = 0
                if self.obj_instances_limit != -1 and len(
                        obj_ids
                ) * self.obj_instances_limit < self.num_of_objs_to_sample:
                    raise RuntimeError(
                        "{}'s {} split contains {} objects, {} object where requested to sample with "
                        "an instances limit of {}. Raise the limit amount or decrease the requested "
                        "amount of objects.".format(self.bop_dataset_path,
                                                    self.split, len(obj_ids),
                                                    self.num_of_objs_to_sample,
                                                    self.obj_instances_limit))
                while loaded_amount != self.num_of_objs_to_sample:
                    random_id = choice(obj_ids)
                    if random_id not in loaded_ids.keys():
                        loaded_ids.update({random_id: 0})
                    # if there is no limit or if there is one, but it is not reached for this particular object
                    if self.obj_instances_limit == -1 or loaded_ids[
                            random_id] < self.obj_instances_limit:
                        cur_obj = self._load_mesh(random_id,
                                                  model_p,
                                                  scale=self.scale)
                        loaded_ids[random_id] += 1
                        loaded_amount += 1
                        loaded_objects.append(cur_obj)
                    else:
                        print(
                            "ID {} was loaded {} times with limit of {}. Total loaded amount {} while {} are "
                            "being requested".format(
                                random_id, loaded_ids[random_id],
                                self.obj_instances_limit, loaded_amount,
                                self.num_of_objs_to_sample))
            else:
                for obj_id in obj_ids:
                    cur_obj = self._load_mesh(obj_id,
                                              model_p,
                                              scale=self.scale)
                    loaded_objects.append(cur_obj)
            self._set_properties(loaded_objects)

        # replicate scene: load scene objects, object poses, camera intrinsics and camera poses
        else:
            sc_gt = inout.load_scene_gt(split_p['scene_gt_tpath'].format(
                **{'scene_id': self.scene_id}))
            sc_camera = inout.load_json(split_p['scene_camera_tpath'].format(
                **{'scene_id': self.scene_id}))
            for i, (cam_id, insts) in enumerate(sc_gt.items()):
                cam_K, cam_H_m2c_ref = self._get_ref_cam_extrinsics_intrinsics(
                    sc_camera, cam_id, insts, self.scale)

                if i == 0:
                    # define world = first camera
                    cam_H_m2w_ref = cam_H_m2c_ref.copy()

                    cur_objs = []
                    # load scene objects and set their poses
                    for inst in insts:
                        cur_objs.append(
                            self._load_mesh(inst['obj_id'],
                                            model_p,
                                            scale=self.scale))
                        self.set_object_pose(cur_objs[-1], inst, self.scale)

                cam_H_c2w = self._compute_camera_to_world_trafo(
                    cam_H_m2w_ref, cam_H_m2c_ref)
                # set camera intrinsics
                CameraUtility.set_intrinsics_from_K_matrix(
                    cam_K, split_p['im_size'][0], split_p['im_size'][1])

                # set camera extrinsics as next frame
                frame_id = CameraUtility.add_camera_pose(cam_H_c2w)

                # Add key frame for camera shift, as it changes from frame to frame in the tless replication
                cam = bpy.context.scene.camera.data
                cam.keyframe_insert(data_path='shift_x', frame=frame_id)
                cam.keyframe_insert(data_path='shift_y', frame=frame_id)

                # Copy object poses to key frame (to be sure)
                for cur_obj in cur_objs:
                    self._insert_key_frames(cur_obj, frame_id)

        # move the origin of the object to the world origin and on top of the X-Y plane
        # makes it easier to place them later on, this does not change the `.location`
        # This is only useful if the BOP objects are not used in a pose estimation scenario.
        move_to_origin = self.config.get_bool("move_origin_to_x_y_plane",
                                              False)
        if move_to_origin:
            LoaderInterface.move_obj_origin_to_bottom_mean_point(
                loaded_objects)
Exemplo n.º 12
0
                  'w')  #gt file for training keras-retinanet
l_writer = csv.writer(label_file, delimiter=",")

csvfile = open(os.path.join(bop_dir, "retinanet_gt.csv"),
               'w')  #gt file for training keras-retinanet
writer = csv.writer(csvfile, delimiter=",")

for m_idx, m_id in enumerate(model_ids):
    l_writer.writerow([m_id, m_idx])
label_file.close()

if (dataset != 'ycbv'):
    print("Loading...", dataset)
    im_width, im_height = cam_param_global['im_size']

    test_params = dataset_params.get_split_params(bop_dir, dataset, "test")
    if (test_params['depth_range'] is not None):
        mean_depth = np.mean(test_params['depth_range']) / 1000
        max_depth = test_params['depth_range'][1] / 1000
    elif (dataset == "itodd"):
        mean_depth = (0.601 + 1.102) / 2
        max_depth = 1.102

    else:
        mean_depth = 1  #1default depth= 1
        max_depth = 3

    #build map from class_id(used in Detection pipeline) to actual obj_id (in GT)
    #detection of i+1 label means detetction of model number: model_ids[i]
    crop_dir = bop_dir + "/train_crop"
    cropmask_dir = bop_dir + "/train_cropmask"
Exemplo n.º 13
0
    def run(self):

        bop_dataset_path = self.config.get_string("bop_dataset_path")
        scene_id = self.config.get_int("scene_id", -1)
        obj_ids = self.config.get_list("obj_ids", [])
        split = self.config.get_string("split", "test")
        model_type = self.config.get_string("model_type", "")
        cam_type = self.config.get_string("cam_type", "")
        mm2m = 0.001 if self.config.get_bool("mm2m", False) else 1
        datasets_path = os.path.dirname(bop_dataset_path)
        dataset = os.path.basename(bop_dataset_path)

        print("bob: {}, dataset_path: {}".format(bop_dataset_path,
                                                 datasets_path))
        print("dataset: {}".format(dataset))

        try:
            from bop_toolkit_lib import dataset_params, inout
        except ImportError as error:
            print(
                'ERROR: Please download the bop_toolkit package and add it to sys_paths in config!'
            )
            print('https://github.com/thodan/bop_toolkit')
            raise error

        model_p = dataset_params.get_model_params(
            datasets_path,
            dataset,
            model_type=model_type if model_type else None)
        cam_p = dataset_params.get_camera_params(
            datasets_path, dataset, cam_type=cam_type if cam_type else None)
        bpy.data.scenes["Scene"]["num_labels"] = len(model_p['obj_ids'])

        try:
            split_p = dataset_params.get_split_params(datasets_path,
                                                      dataset,
                                                      split=split)
        except ValueError:
            raise Exception(
                "Wrong path or {} split does not exist in {}.".format(
                    split, dataset))

        bpy.context.scene.world["category_id"] = 0
        bpy.context.scene.render.resolution_x = self.config.get_int(
            "resolution_x", split_p['im_size'][0])
        bpy.context.scene.render.resolution_y = self.config.get_int(
            "resolution_y", split_p['im_size'][1])

        # Collect camera and camera object
        cam_ob = bpy.context.scene.camera
        cam = cam_ob.data
        cam['loaded_resolution'] = bpy.context.scene.render.resolution_x, bpy.context.scene.render.resolution_y
        cam['loaded_intrinsics'] = cam_p[
            'K']  # load default intrinsics from camera.json

        #only load all/selected objects here, use other modules for setting poses, e.g. camera.CameraSampler / object.ObjectPoseSampler
        if scene_id == -1:
            obj_ids = obj_ids if obj_ids else model_p['obj_ids']
            for obj_id in obj_ids:
                self._load_mesh(obj_id, model_p, mm2m=mm2m)
        # replicate scene: load scene objects, object poses, camera intrinsics and camera poses
        else:
            sc_gt = inout.load_scene_gt(
                split_p['scene_gt_tpath'].format(**{'scene_id': scene_id}))
            sc_camera = inout.load_json(
                split_p['scene_camera_tpath'].format(**{'scene_id': scene_id}))

            cm = CameraModule(self.config)

            for i, (cam_id, insts) in enumerate(sc_gt.items()):

                cam_K = np.array(sc_camera[str(cam_id)]['cam_K']).reshape(3, 3)

                cam_H_m2c_ref = np.eye(4)
                cam_H_m2c_ref[:3, :3] = np.array(
                    insts[0]['cam_R_m2c']).reshape(3, 3)
                cam_H_m2c_ref[:3, 3] = np.array(
                    insts[0]['cam_t_m2c']).reshape(3) * mm2m

                if i == 0:
                    # define world = first camera
                    cam_H_m2w_ref = cam_H_m2c_ref.copy()

                    for inst in insts:
                        cur_obj = self._load_mesh(inst['obj_id'],
                                                  model_p,
                                                  mm2m=mm2m)

                        cam_H_m2c = np.eye(4)
                        cam_H_m2c[:3, :3] = np.array(
                            inst['cam_R_m2c']).reshape(3, 3)
                        cam_H_m2c[:3, 3] = np.array(
                            inst['cam_t_m2c']).reshape(3) * mm2m

                        # world = camera @ i=0
                        cam_H_m2w = cam_H_m2c
                        print('-----------------------------')
                        print("Model: {}".format(cam_H_m2w))
                        print('-----------------------------')

                        cur_obj.matrix_world = Matrix(cam_H_m2w)

                cam_H_c2w = np.dot(cam_H_m2w_ref, np.linalg.inv(cam_H_m2c_ref))

                print('-----------------------------')
                print("Cam: {}".format(cam_H_c2w))
                print('-----------------------------')

                config = {"location": [0, 0, 0], "rotation": list([0, 0, 0])}
                cm._add_cam_pose(Config(config), Matrix(cam_H_c2w), cam_K)
Exemplo n.º 14
0
def main(_):
    tf.logging.set_verbosity(tf.logging.INFO)

    # Load the list examples.
    examples_path = os.path.join(config.TF_DATA_PATH, 'example_lists',
                                 FLAGS.examples_filename)
    tf.logging.info(
        'Loading a list of examples from: {}'.format(examples_path))
    examples_list = tfrecord.load_example_list(examples_path)

    # Load dataset parameters.
    dp_split = dataset_params.get_split_params(config.BOP_PATH, FLAGS.dataset,
                                               FLAGS.split, FLAGS.split_type)

    # Pre-load camera parameters and ground-truth annotations.
    scene_gt = {}
    scene_gt_info = {}
    scene_camera = {}
    scene_ids = set([e['scene_id'] for e in examples_list])
    for scene_id in scene_ids:

        scene_camera[scene_id] = inout.load_scene_camera(
            dp_split['scene_camera_tpath'].format(scene_id=scene_id))

        if FLAGS.add_gt:
            scene_gt[scene_id] = inout.load_scene_gt(
                dp_split['scene_gt_tpath'].format(scene_id=scene_id))
            scene_gt_info[scene_id] = inout.load_json(
                dp_split['scene_gt_info_tpath'].format(scene_id=scene_id),
                keys_to_int=True)

    # Check the name of the file with examples.
    examples_end = '_examples.txt'
    if not FLAGS.examples_filename.endswith(examples_end):
        raise ValueError(
            'Name of the file with examples must end with {}.'.format(
                examples_end))

    # Prepare writer of the TFRecord file.
    output_name = FLAGS.examples_filename.split(examples_end)[0]
    output_path = os.path.join(FLAGS.output_dir, output_name + '.tfrecord')
    writer = tf.python_io.TFRecordWriter(output_path)
    tf.logging.info('File to be created: {}'.format(output_path))

    # Optionally shuffle the examples.
    if FLAGS.shuffle:
        random.shuffle(examples_list)

    # Write the examples to the TFRecord file.
    w_start_t = time.time()

    create_tf_example_partial = partial(create_tf_example,
                                        dp_split=dp_split,
                                        scene_camera=scene_camera,
                                        scene_gt=scene_gt,
                                        scene_gt_info=scene_gt_info)

    for example_id, example in enumerate(examples_list):
        if example_id % 50 == 0:
            tf.logging.info('Processing example {}/{}'.format(
                example_id + 1, len(examples_list)))

        tf_example, _ = create_tf_example_partial(example)
        writer.write(tf_example)

    # Close the writer.
    writer.close()

    w_total_t = time.time() - w_start_t
    tf.logging.info('Writing took {} s.'.format(w_total_t))
Exemplo n.º 15
0
# PARAMETERS.
################################################################################
p = {
    # See dataset_params.py for options.
    'dataset': 'lm',

    # Type of input object models.
    'model_type': 'eval',

    # Folder containing the BOP datasets.
    'datasets_path': config.datasets_path,
}
################################################################################

# Load dataset parameters.
dp_split = dataset_params.get_split_params(p['datasets_path'], p['dataset'],
                                           'train')

dp_model = dataset_params.get_model_params(p['datasets_path'], p['dataset'],
                                           p['model_type'])

models_info = {}
for obj_id in dp_model['obj_ids']:
    misc.log('Processing model of object {}...'.format(obj_id))

    model = inout.load_ply(dp_model['model_tpath'].format(obj_id=obj_id))

    # Calculate 3D bounding box.
    ref_pt = map(float, model['pts'].min(axis=0).flatten())
    size = map(float, (model['pts'].max(axis=0) - ref_pt).flatten())

    # Calculated diameter.