Пример #1
0
def main(_):
  tf.logging.set_verbosity(tf.logging.INFO)

  if FLAGS.scene_ids is not None and FLAGS.targets_filename is not None:
    raise ValueError(
      'Only up to one of scene_ids and targets_filename can be specified.')

  # Load dataset parameters.
  dp_split = dataset_params.get_split_params(
    config.BOP_PATH, FLAGS.dataset, FLAGS.split, FLAGS.split_type)

  output_suffix = None

  if FLAGS.targets_filename:
    output_suffix = 'targets'
    test_targets = inout.load_json(
      os.path.join(config.BOP_PATH, FLAGS.dataset, FLAGS.targets_filename))
    example_list = []
    for trg in test_targets:
      example = {'scene_id': trg['scene_id'], 'im_id': trg['im_id']}
      if example not in example_list:
        example_list.append(example)

  else:
    if FLAGS.scene_ids is None:
      FLAGS.scene_ids = dataset_params.get_present_scene_ids(dp_split)
    else:
      FLAGS.scene_ids = list(map(int, FLAGS.scene_ids))
      output_suffix = 'scenes-' + '-'.join(
        map(lambda x: '{:01d}'.format(x), FLAGS.scene_ids))

    tf.logging.info('Collecting examples...')
    example_list = []
    for scene_id in FLAGS.scene_ids:
      scene_gt_fpath = dp_split['scene_gt_tpath'].format(scene_id=scene_id)
      im_ids = inout.load_scene_gt(scene_gt_fpath).keys()
      for im_id in sorted(im_ids):
        example_list.append({'scene_id': scene_id, 'im_id': im_id})

  tf.logging.info('Collected {} examples.'.format(len(example_list)))
  assert(len(example_list) > 0)

  split_name = FLAGS.split
  if FLAGS.split_type is not None:
    split_name += '-' + FLAGS.split_type

  if output_suffix is not None:
    output_suffix = '_' + output_suffix
  else:
    output_suffix = ''

  output_fname = '{}_{}{}_examples.txt'.format(
    FLAGS.dataset, split_name, output_suffix)
  output_fpath = os.path.join(FLAGS.output_dir, output_fname)

  tf.logging.info('Saving the list to: {}'.format(output_fpath))
  if not os.path.exists(FLAGS.output_dir):
    os.makedirs(FLAGS.output_dir)
  tfrecord.save_example_list(output_fpath, example_list)
Пример #2
0
def get_target_list(target_path):
    targets = inout.load_json(target_path)
    prev_imid=-1
    prev_sid=-1
    target_list=[]
    for i in range(len(targets)):
        tgt = targets[i]    
        im_id = tgt['im_id']
        inst_count = tgt['inst_count']
        obj_id = tgt['obj_id']
        scene_id = tgt['scene_id']
        if(prev_imid!=im_id or prev_sid!=scene_id):
            if(prev_imid!=-1):
                target_list.append([prev_sid,prev_imid,obj_ids,inst_counts])
            obj_ids= [obj_id]
            inst_counts= [inst_count]
        else:
            obj_ids.append(obj_id)
            inst_counts.append(inst_count)
        prev_imid=im_id
        prev_sid=scene_id
    target_list.append([prev_sid,prev_imid,obj_ids,inst_counts]) #append the list image
    return target_list
Пример #3
0
    def __init__(self, cfg):
        self.cfg = cfg
        self.rgb_topic = cfg['rgb_topic']
        self.depth_topic = cfg['depth_topic']
        self.camK = np.array(cfg['cam_K']).reshape(3, 3)
        self.im_width = int(cfg['im_width'])
        self.im_height = int(cfg['im_height'])
        self.inlier_th = float(cfg['inlier_th'])
        self.ransac_th = float(cfg['ransac_th'])
        self.pub_before_icp = False
        self.graph = tf_backend.Graph()
        if (int(cfg['icp']) == 1):
            self.icp = True
        else:
            self.icp = False
        self.model_params = inout.load_json(cfg['norm_factor_fn'])
        self.detection_labels = cfg[
            'obj_labels']  #labels of corresponding detections
        n_objs = int(cfg['n_objs'])
        self.target_objs = cfg['target_obj_name']
        self.colors = np.random.randint(0, 255, (n_objs, 3))

        with self.graph.as_default():
            if (detect_type == "rcnn"):
                #Load mask r_cnn
                '''
                standard estimation parameter for Mask R-CNN (identical for all dataset)
                '''
                self.config = BopInferenceConfig(dataset="ros",
                                                 num_classes=n_objs + 1,
                                                 im_width=self.im_width,
                                                 im_height=self.im_height)
                self.config.DETECTION_MIN_CONFIDENCE = 0.3
                self.config.DETECTION_MAX_INSTANCES = 30
                self.config.DETECTION_NMS_THRESHOLD = 0.5

                self.detection_model = modellib.MaskRCNN(mode="inference",
                                                         config=self.config,
                                                         model_dir="/")
                self.detection_model.load_weights(
                    cfg['path_to_detection_weights'], by_name=True)

            self.obj_models = []
            self.obj_bboxes = []

            self.obj_pix2pose = []
            pix2pose_dir = cfg['path_to_pix2pose_weights']
            th_outlier = cfg['outlier_th']
            self.model_scale = cfg['model_scale']
            for t_id, target_obj in enumerate(self.target_objs):
                weight_fn = os.path.join(
                    pix2pose_dir, "{:02d}/inference.hdf5".format(target_obj))
                print("Load pix2pose weights from ", weight_fn)
                model_param = self.model_params['{}'.format(target_obj)]
                obj_param = bop_io.get_model_params(model_param)
                recog_temp = recog.pix2pose(weight_fn,
                                            camK=self.camK,
                                            res_x=self.im_width,
                                            res_y=self.im_height,
                                            obj_param=obj_param,
                                            th_ransac=self.ransac_th,
                                            th_outlier=th_outlier,
                                            th_inlier=self.inlier_th)
                self.obj_pix2pose.append(recog_temp)
                ply_fn = os.path.join(self.cfg['model_dir'],
                                      self.cfg['ply_files'][t_id])
                if (self.icp):
                    #for pyrender rendering
                    obj_model = trimesh.load_mesh(ply_fn)
                    obj_model.vertices = obj_model.vertices * self.model_scale
                    mesh = pyrender.Mesh.from_trimesh(obj_model)
                    self.obj_models.append(mesh)
                    self.obj_bboxes.append(
                        self.get_3d_box_points(obj_model.vertices))

                else:
                    obj_model = inout.load_ply(ply_fn)
                    self.obj_bboxes.append(
                        self.get_3d_box_points(obj_model['pts']))

                rospy.init_node('pix2pose', anonymous=True)
                self.detect_pub = rospy.Publisher("/pix2pose/detected_object",
                                                  ros_image)

                #self.pose_pub = rospy.Publisher("/pix2pose/object_pose", Pose)
                self.pose_pub = rospy.Publisher("/pix2pose/object_pose",
                                                ros_image)
                self.have_depth = False

                if (self.icp):
                    self.sub_depth = rospy.Subscriber(self.depth_topic,
                                                      ros_image,
                                                      self.callback_depth,
                                                      queue_size=1)
                    if (self.pub_before_icp):
                        self.pose_pub_noicp = rospy.Publisher(
                            "/pix2pose/object_pose_noicp", ros_image)

        self.depth_img = np.zeros((self.im_height, self.im_width))
        self.sub = rospy.Subscriber(self.rgb_topic,
                                    ros_image,
                                    self.callback,
                                    queue_size=1)
Пример #4
0
import cv2
from matplotlib import pyplot as plt

#selection of detection pipelines
import keras
import tensorflow as tf_backend

ROOT_DIR = os.path.abspath(".")
sys.path.append(ROOT_DIR)  # To find local version of the library
sys.path.append("./bop_toolkit")

from bop_toolkit_lib import inout
from tools import bop_io

cfg_path_detection = "ros_kinetic/ros_config.json"
cfg = inout.load_json(cfg_path_detection)

detect_type = cfg['detection_pipeline']
if detect_type == 'rcnn':
    detection_dir = cfg['path_to_detection_pipeline']
    sys.path.append(detection_dir)
    from mrcnn.config import Config
    from mrcnn import utils
    import mrcnn.model as modellib
    from tools.mask_rcnn_util import BopInferenceConfig

#"/hsrb/head_rgbd_sensor/rgb/image_rect_color",

icp = False
if (int(cfg['icp']) == 1):
    icp = True
Пример #5
0
import bop_toolkit_lib.inout as inout
import bop_toolkit_lib.misc as misc
from bop_toolkit_lib.renderer_py import RendererPython
import bop_toolkit_lib.pose_error as error
sys.path.append(".")
sys.path.append("./src")
import src.sampling as sampling
import src.plausibility as plausibility


YCBV_PATH = "/PATH_TO/BOP19/ycbv"
assert YCBV_PATH != "/PATH_TO/BOP19/ycbv"  # set to your model directory

# load model meta data
DATA_PATH = os.path.join(os.path.abspath(os.getcwd()), "data")
models_info = inout.load_json(os.path.join(YCBV_PATH, f"models_eval/models_info.json"), keys_to_int=True)
models_meta = inout.load_json(os.path.join(DATA_PATH, f"models_meta.json"), keys_to_int=True)
obj_roff = np.array([models_meta[obj_id]['R_to_canonical'] for obj_id in range(1, 22)]).reshape(21, 3, 3)
obj_toff = np.array([models_meta[obj_id]['t_to_canonical'] for obj_id in range(1, 22)]).reshape(21, 3)


def init(target):
    """
    Initialize scene for given [target] object to evaluate pose-error functions. Prepare rendering and evaluation.
    """

    renderer = RendererPython(640, 480, bg_color=(1.0, 1.0, 1.0, 1.0), shading='flat')

    # base scene (static support)
    ground_volume = resource.get(f"{DATA_PATH}/cube.ply").convert('VolumeGrid')
    renderer.add_object(0, f"{DATA_PATH}/cube.ply")
Пример #6
0
    
gpu_id = sys.argv[1]
if(gpu_id=='-1'):
    gpu_id=''
os.environ['CUDA_VISIBLE_DEVICES'] = gpu_id
import tensorflow as tf
from bop_toolkit_lib import inout
from tools import bop_io
from pix2pose_util import data_io as dataio
from pix2pose_model import ae_model as ae
from pix2pose_model import recognition as recog
from pix2pose_util.common_util import get_bbox_from_mask


cfg_fn =sys.argv[2]
cfg = inout.load_json(cfg_fn)
detect_type = cfg['detection_pipeline']
if detect_type=='rcnn':
    detection_dir=cfg['path_to_detection_pipeline']
    sys.path.append(detection_dir)
    from mrcnn.config import Config
    from mrcnn import utils
    import mrcnn.model as modellib
    from tools.mask_rcnn_util import BopInferenceConfig
    def get_rcnn_detection(image_t,model):
        image_t_resized, window, scale, padding, crop = utils.resize_image(
                        np.copy(image_t),
                        min_dim=config.IMAGE_MIN_DIM,
                        min_scale=config.IMAGE_MIN_SCALE,
                        max_dim=config.IMAGE_MAX_DIM,
                        mode=config.IMAGE_RESIZE_MODE)
Пример #7
0
dists = []
azimuths = []
elevs = []
visib_fracts = []
ims_count = 0
for scene_id in scene_ids:
    misc.log('Processing - dataset: {} ({}, {}), scene: {}'.format(
        p['dataset'], p['dataset_split'], p['dataset_split_type'], scene_id))

    # Load GT poses.
    scene_gt = inout.load_scene_gt(
        dp_split['scene_gt_tpath'].format(scene_id=scene_id))

    # Load info about the GT poses.
    scene_gt_info = inout.load_json(
        dp_split['scene_gt_info_tpath'].format(scene_id=scene_id),
        keys_to_int=True)

    ims_count += len(scene_gt)

    for im_id in scene_gt.keys():
        for gt_id, im_gt in enumerate(scene_gt[im_id]):

            # Object distance.
            dist = np.linalg.norm(im_gt['cam_t_m2c'])
            dists.append(dist)

            # Camera origin in the model coordinate system.
            cam_orig_m = -np.linalg.inv(im_gt['cam_R_m2c']).dot(
                im_gt['cam_t_m2c'])
Пример #8
0
def get_dataset(cfg,dataset,train=True,incl_param=False,eval=False,eval_model=False):
    #return serialized datset information
    bop_dir = cfg['dataset_dir']
    if eval_model:
        postfix_model = '_eval'
    else:
        postfix_model = ''
    if(dataset=='lmo'):
      bop_dataset_dir = os.path.join(bop_dir,"lmo")
      test_dir = bop_dataset_dir+"/test"
      train_dir = bop_dataset_dir+"/train"
      model_dir = bop_dataset_dir+"/models"+postfix_model
      model_scale=0.001
    elif(dataset=='ruapc'):
      bop_dataset_dir = os.path.join(bop_dir,"ruapc")
      test_dir = bop_dataset_dir+"/test"
      train_dir = bop_dataset_dir+"/train"
      model_dir = bop_dataset_dir+"/models"+postfix_model
      model_scale=0.001
    elif(dataset=='hb'):
      bop_dataset_dir = os.path.join(bop_dir,"hb")
      test_dir = bop_dataset_dir+"/test"
      train_dir = bop_dataset_dir+"/train"
      model_dir = bop_dataset_dir+"/models"+postfix_model
      model_scale=0.0001
    elif(dataset=='icbin'):
        bop_dataset_dir = os.path.join(bop_dir,"icbin")
        test_dir = bop_dataset_dir+"/test"
        train_dir = bop_dataset_dir+"/train"
        model_dir = bop_dataset_dir+"/models"+postfix_model
        model_scale=0.001
    elif(dataset=='itodd'):
        bop_dataset_dir = os.path.join(bop_dir,"itodd")
        test_dir = bop_dataset_dir+"/test"
        train_dir = bop_dataset_dir+"/train"
        model_dir = bop_dataset_dir+"/models"+postfix_model
        model_scale=0.001
    elif(dataset=='tudl'):
        bop_dataset_dir = os.path.join(bop_dir,"tudl")
        test_dir = bop_dataset_dir+"/test"
        train_dir = bop_dataset_dir+"/train_real"
        model_dir = bop_dataset_dir+"/models"+postfix_model
        model_scale=0.001
    elif(dataset=='tless'):
        bop_dataset_dir = os.path.join(bop_dir,"tless")
        test_dir = bop_dataset_dir+"/test_primesense"
        train_dir = bop_dataset_dir+"/train_primesense"
        if not(train) and not(eval_model):
            model_dir = bop_dataset_dir+"/models_reconst" #use this only for vis
        elif eval_model:
            model_dir = bop_dataset_dir+"/models_eval"
        else:
            model_dir = bop_dataset_dir+"/models_cad"
        model_scale=0.001
    elif(dataset=='ycbv'):
        bop_dataset_dir = os.path.join(bop_dir,"ycbv")
        test_dir = bop_dataset_dir+"/test"
        train_dir = bop_dataset_dir+"/train"
        model_dir = bop_dataset_dir+"/models"+postfix_model
        model_scale=0.001
    elif(dataset=='lm'):
        bop_dataset_dir = os.path.join(bop_dir,"lm")
        test_dir = bop_dataset_dir+"/test"
        train_dir = bop_dataset_dir+"/train"
        model_dir = bop_dataset_dir+"/models"+postfix_model
        model_dir  = "/home/kiru/media/hdd_linux/PoseDataset/hinterstoisser/model_eval"
        model_scale=0.001
    
    model_info = inout.load_json(os.path.join(model_dir,"models_info.json"))
    if(dataset=='ycbv'):
        cam_param_global = inout.load_cam_params(os.path.join(bop_dataset_dir,"camera_uw.json"))
    else:
        cam_param_global = inout.load_cam_params(os.path.join(bop_dataset_dir,"camera.json"))
    
    im_size=np.array(cam_param_global['im_size'])[::-1]
    
    model_plys=[]
    rgb_files=[]
    depth_files=[]
    mask_files=[]
    gts=[]
    params=[]
    model_ids = []
    for model_id in model_info.keys():
        ply_fn = os.path.join(model_dir,"obj_{:06d}.ply".format(int(model_id)))
        if(os.path.exists(ply_fn)): model_ids.append(int(model_id)) #add model id only if the model.ply file exists

    model_ids = np.sort(np.array(model_ids))
    for model_id in model_ids:
        ply_fn = os.path.join(model_dir,"obj_{:06d}.ply".format(int(model_id)))
        model_plys.append(ply_fn)
        print(model_id,ply_fn)
    print("if models are not fully listed above, please make sure there are ply files available")
    if(train): 
        target_dir =train_dir    
        if(os.path.exists(target_dir)):        
            for dir in os.listdir(target_dir): #loop over a seqeunce 
                current_dir = target_dir+"/"+dir
                if os.path.exists(os.path.join(current_dir,"scene_camera.json")):
                    scene_params = inout.load_scene_camera(os.path.join(current_dir,"scene_camera.json"))            
                    scene_gt_fn = os.path.join(current_dir,"scene_gt.json")
                    has_gt=False
                    if os.path.exists(scene_gt_fn):
                        scene_gts = inout.load_scene_gt(scene_gt_fn)
                        has_gt=True
                    for img_id in sorted(scene_params.keys()):
                        im_id = int(img_id)
                        if(dataset=="itodd" and not(train)):
                            rgb_fn = os.path.join(current_dir+"/gray","{:06d}.tif".format(im_id))
                        else:
                            rgb_fn = os.path.join(current_dir+"/rgb","{:06d}.png".format(im_id))
                        depth_fn = os.path.join(current_dir+"/depth","{:06d}.png".format(im_id))
                        if(train):
                            if(dataset=='hb' or dataset=='itodd' or dataset=='ycbv'):
                                mask_fn = os.path.join(current_dir+"/mask","{:06d}.png".format(im_id))
                            else:
                                mask_fn = os.path.join(current_dir+"/mask","{:06d}_000000.png".format(im_id))
                            mask_files.append(mask_fn)
                        rgb_files.append(rgb_fn)
                        depth_files.append(depth_fn)
                        if(has_gt):gts.append(scene_gts[im_id])
                        params.append(scene_params[im_id])
    else:
        target_dir =test_dir    
                 
    if(incl_param):
        return bop_dataset_dir,target_dir,model_plys,model_info,model_ids,rgb_files,depth_files,mask_files,gts,cam_param_global,params
    else:
        return bop_dataset_dir,target_dir,model_plys,model_info,model_ids,rgb_files,depth_files,mask_files,gts,cam_param_global
                                           model_type)
dp_camera = dataset_params.get_camera_params(p['datasets_path'], p['dataset'])

K = dp_camera['K']
fx, fy, cx, cy = K[0, 0], K[1, 1], K[0, 2], K[1, 2]

# Create a renderer.
width, height = dp_camera['im_size']
ren = renderer.create_renderer(width,
                               height,
                               p['renderer_type'],
                               mode='rgb',
                               shading='flat')

# Load meta info about the models (including symmetries).
models_info = inout.load_json(dp_model['models_info_path'], keys_to_int=True)

for obj_id in dp_model['obj_ids']:

    # Load object model.
    misc.log('Loading 3D model of object {}...'.format(obj_id))
    model_path = dp_model['model_tpath'].format(obj_id=obj_id)
    ren.add_object(obj_id, model_path)

    poses = misc.get_symmetry_transformations(models_info[obj_id],
                                              p['max_sym_disc_step'])

    for pose_id, pose in enumerate(poses):

        for view_id, view in enumerate(p['views']):
Пример #10
0
    def run(self):
        bop_dataset_path = self.config.get_string("bop_dataset_path")
        scene_id = self.config.get_int("scene_id")
        split = self.config.get_string("split", "test")
        model_type = self.config.get_string("model_type", "")
        mm2m = 0.001 if self.config.get_bool("mm2m") else 1

        datasets_path = os.path.dirname(bop_dataset_path)
        dataset = os.path.basename(bop_dataset_path)
        print("bob: {}, dataset_path: {}".format(bop_dataset_path,
                                                 datasets_path))
        print("dataset: {}".format(dataset))

        model_p = dataset_params.get_model_params(
            datasets_path,
            dataset,
            model_type=model_type if model_type else None)
        camera_p = dataset_params.get_camera_params(datasets_path, dataset)

        try:
            split_p = dataset_params.get_split_params(datasets_path,
                                                      dataset,
                                                      split=split)
        except ValueError:
            raise Exception(
                "Wrong path or {} split does not exist in {}.".format(
                    split, dataset))

        sc_gt = inout.load_scene_gt(
            split_p['scene_gt_tpath'].format(**{'scene_id': scene_id}))
        sc_camera = inout.load_json(
            split_p['scene_camera_tpath'].format(**{'scene_id': scene_id}))

        bpy.context.scene.render.resolution_x = self.config.get_int(
            "resolution_x", split_p['im_size'][0])
        bpy.context.scene.render.resolution_y = self.config.get_int(
            "resolution_y", split_p['im_size'][1])
        #bpy.context.scene.render.pixel_aspect_x = self.config.get_float("pixel_aspect_x", 1) #split_p['im_size'][0] / split_p['im_size'][1])

        cm = CameraModule(self.config)

        for i, (cam_id, insts) in enumerate(sc_gt.items()):

            cam_K = np.array(sc_camera[str(cam_id)]['cam_K']).reshape(3, 3)

            cam_H_m2c_ref = np.eye(4)
            cam_H_m2c_ref[:3, :3] = np.array(insts[0]['cam_R_m2c']).reshape(
                3, 3)
            cam_H_m2c_ref[:3, 3] = np.array(
                insts[0]['cam_t_m2c']).reshape(3) * mm2m

            if i == 0:
                # define world = first camera
                cam_H_m2w_ref = cam_H_m2c_ref.copy()

                for inst in insts:

                    bpy.ops.import_mesh.ply(
                        filepath=model_p['model_tpath'].format(
                            **{'obj_id': inst['obj_id']}))

                    cam_H_m2c = np.eye(4)
                    cam_H_m2c[:3, :3] = np.array(inst['cam_R_m2c']).reshape(
                        3, 3)
                    cam_H_m2c[:3, 3] = np.array(
                        inst['cam_t_m2c']).reshape(3) * mm2m

                    # world = camera @ i=0
                    cam_H_m2w = cam_H_m2c
                    print('-----------------------------')
                    print("Model: {}".format(cam_H_m2w))
                    print('-----------------------------')

                    cur_obj = bpy.context.selected_objects[-1]
                    cur_obj.matrix_world = Matrix(cam_H_m2w)
                    cur_obj.scale = Vector((mm2m, mm2m, mm2m))

                    mat = self._load_materials(cur_obj)
                    self._link_col_node(mat)

            cam_H_c2w = np.dot(cam_H_m2w_ref, np.linalg.inv(cam_H_m2c_ref))

            print('-----------------------------')
            print("Cam: {}".format(cam_H_c2w))
            print('-----------------------------')

            config = {"location": [0, 0, 0], "rotation": list([0, 0, 0])}
            cm._add_cam_pose(Config(config), Matrix(cam_H_c2w), cam_K)
Пример #11
0
    def run(self):
        """ Load BOP data """

        datasets_path = os.path.dirname(self.bop_dataset_path)
        dataset = os.path.basename(self.bop_dataset_path)

        print("bob: {}, dataset_path: {}".format(self.bop_dataset_path,
                                                 datasets_path))
        print("dataset: {}".format(dataset))

        try:
            from bop_toolkit_lib import dataset_params, inout
        except ImportError as error:
            print(
                'ERROR: Please download the bop_toolkit package and add it to sys_paths in config!'
            )
            print('https://github.com/thodan/bop_toolkit')
            raise error

        model_p = dataset_params.get_model_params(
            datasets_path,
            dataset,
            model_type=self.model_type if self.model_type else None)
        cam_p = dataset_params.get_camera_params(
            datasets_path,
            dataset,
            cam_type=self.cam_type if self.cam_type else None)

        try:
            split_p = dataset_params.get_split_params(datasets_path,
                                                      dataset,
                                                      split=self.split)
        except ValueError:
            raise Exception(
                "Wrong path or {} split does not exist in {}.".format(
                    self.split, dataset))

        bpy.context.scene.world["category_id"] = 0
        bpy.context.scene.render.resolution_x = cam_p['im_size'][0]
        bpy.context.scene.render.resolution_y = cam_p['im_size'][1]

        loaded_objects = []

        # only load all/selected objects here, use other modules for setting poses
        # e.g. camera.CameraSampler / object.ObjectPoseSampler
        if self.scene_id == -1:

            # TLESS exception because images are cropped
            if self.bop_dataset_name in ['tless']:
                cam_p['K'][0, 2] = split_p['im_size'][0] / 2
                cam_p['K'][1, 2] = split_p['im_size'][1] / 2

            # set camera intrinsics
            CameraUtility.set_intrinsics_from_K_matrix(cam_p['K'],
                                                       split_p['im_size'][0],
                                                       split_p['im_size'][1])

            obj_ids = self.obj_ids if self.obj_ids else model_p['obj_ids']
            # if sampling is enabled
            if self.sample_objects:
                loaded_ids = {}
                loaded_amount = 0
                if self.obj_instances_limit != -1 and len(
                        obj_ids
                ) * self.obj_instances_limit < self.num_of_objs_to_sample:
                    raise RuntimeError(
                        "{}'s {} split contains {} objects, {} object where requested to sample with "
                        "an instances limit of {}. Raise the limit amount or decrease the requested "
                        "amount of objects.".format(self.bop_dataset_path,
                                                    self.split, len(obj_ids),
                                                    self.num_of_objs_to_sample,
                                                    self.obj_instances_limit))
                while loaded_amount != self.num_of_objs_to_sample:
                    random_id = choice(obj_ids)
                    if random_id not in loaded_ids.keys():
                        loaded_ids.update({random_id: 0})
                    # if there is no limit or if there is one, but it is not reached for this particular object
                    if self.obj_instances_limit == -1 or loaded_ids[
                            random_id] < self.obj_instances_limit:
                        cur_obj = self._load_mesh(random_id,
                                                  model_p,
                                                  scale=self.scale)
                        loaded_ids[random_id] += 1
                        loaded_amount += 1
                        loaded_objects.append(cur_obj)
                    else:
                        print(
                            "ID {} was loaded {} times with limit of {}. Total loaded amount {} while {} are "
                            "being requested".format(
                                random_id, loaded_ids[random_id],
                                self.obj_instances_limit, loaded_amount,
                                self.num_of_objs_to_sample))
            else:
                for obj_id in obj_ids:
                    cur_obj = self._load_mesh(obj_id,
                                              model_p,
                                              scale=self.scale)
                    loaded_objects.append(cur_obj)
            self._set_properties(loaded_objects)

        # replicate scene: load scene objects, object poses, camera intrinsics and camera poses
        else:
            sc_gt = inout.load_scene_gt(split_p['scene_gt_tpath'].format(
                **{'scene_id': self.scene_id}))
            sc_camera = inout.load_json(split_p['scene_camera_tpath'].format(
                **{'scene_id': self.scene_id}))
            for i, (cam_id, insts) in enumerate(sc_gt.items()):
                cam_K, cam_H_m2c_ref = self._get_ref_cam_extrinsics_intrinsics(
                    sc_camera, cam_id, insts, self.scale)

                if i == 0:
                    # define world = first camera
                    cam_H_m2w_ref = cam_H_m2c_ref.copy()

                    cur_objs = []
                    # load scene objects and set their poses
                    for inst in insts:
                        cur_objs.append(
                            self._load_mesh(inst['obj_id'],
                                            model_p,
                                            scale=self.scale))
                        self.set_object_pose(cur_objs[-1], inst, self.scale)

                cam_H_c2w = self._compute_camera_to_world_trafo(
                    cam_H_m2w_ref, cam_H_m2c_ref)
                # set camera intrinsics
                CameraUtility.set_intrinsics_from_K_matrix(
                    cam_K, split_p['im_size'][0], split_p['im_size'][1])

                # set camera extrinsics as next frame
                frame_id = CameraUtility.add_camera_pose(cam_H_c2w)

                # Add key frame for camera shift, as it changes from frame to frame in the tless replication
                cam = bpy.context.scene.camera.data
                cam.keyframe_insert(data_path='shift_x', frame=frame_id)
                cam.keyframe_insert(data_path='shift_y', frame=frame_id)

                # Copy object poses to key frame (to be sure)
                for cur_obj in cur_objs:
                    self._insert_key_frames(cur_obj, frame_id)

        # move the origin of the object to the world origin and on top of the X-Y plane
        # makes it easier to place them later on, this does not change the `.location`
        # This is only useful if the BOP objects are not used in a pose estimation scenario.
        move_to_origin = self.config.get_bool("move_origin_to_x_y_plane",
                                              False)
        if move_to_origin:
            LoaderInterface.move_obj_origin_to_bottom_mean_point(
                loaded_objects)
Пример #12
0
        else:
            new_data = data
        np.save(xyz_fn, new_data)


augment_inplane = 30
if len(sys.argv) < 3:
    print(
        "rendering 3d coordinate images using a converted ply file, format of 6D pose challange(http://cmp.felk.cvut.cz/sixd/challenge_2017/) can be used"
    )
    print(
        "python3 tools/2_2_render_pix2pose_training.py [cfg_fn] [dataset_name]"
    )
else:
    cfg_fn = sys.argv[1]  #"cfg/cfg_bop2019.json"
    cfg = inout.load_json(cfg_fn)

    dataset = sys.argv[2]
    bop_dir,source_dir,model_plys,model_info,model_ids,rgb_files,\
        depth_files,mask_files,gts,cam_param_global,scene_cam =\
             bop_io.get_dataset(cfg,dataset,incl_param=True)

    xyz_target_dir = bop_dir + "/train_xyz"
    im_width, im_height = cam_param_global['im_size']
    cam_K = cam_param_global['K']
    #check if the image dimension is the same
    rgb_fn = rgb_files[0]
    img_temp = inout.load_im(rgb_fn)
    if (img_temp.shape[0] != im_height or img_temp.shape[1] != im_width):
        print("the size of training images is different from test images")
        im_height = img_temp.shape[0]
Пример #13
0
    def run(self):

        bop_dataset_path = self.config.get_string("bop_dataset_path")
        scene_id = self.config.get_int("scene_id", -1)
        obj_ids = self.config.get_list("obj_ids", [])
        split = self.config.get_string("split", "test")
        model_type = self.config.get_string("model_type", "")
        cam_type = self.config.get_string("cam_type", "")
        mm2m = 0.001 if self.config.get_bool("mm2m", False) else 1
        datasets_path = os.path.dirname(bop_dataset_path)
        dataset = os.path.basename(bop_dataset_path)

        print("bob: {}, dataset_path: {}".format(bop_dataset_path,
                                                 datasets_path))
        print("dataset: {}".format(dataset))

        try:
            from bop_toolkit_lib import dataset_params, inout
        except ImportError as error:
            print(
                'ERROR: Please download the bop_toolkit package and add it to sys_paths in config!'
            )
            print('https://github.com/thodan/bop_toolkit')
            raise error

        model_p = dataset_params.get_model_params(
            datasets_path,
            dataset,
            model_type=model_type if model_type else None)
        cam_p = dataset_params.get_camera_params(
            datasets_path, dataset, cam_type=cam_type if cam_type else None)
        bpy.data.scenes["Scene"]["num_labels"] = len(model_p['obj_ids'])

        try:
            split_p = dataset_params.get_split_params(datasets_path,
                                                      dataset,
                                                      split=split)
        except ValueError:
            raise Exception(
                "Wrong path or {} split does not exist in {}.".format(
                    split, dataset))

        bpy.context.scene.world["category_id"] = 0
        bpy.context.scene.render.resolution_x = self.config.get_int(
            "resolution_x", split_p['im_size'][0])
        bpy.context.scene.render.resolution_y = self.config.get_int(
            "resolution_y", split_p['im_size'][1])

        # Collect camera and camera object
        cam_ob = bpy.context.scene.camera
        cam = cam_ob.data
        cam['loaded_resolution'] = bpy.context.scene.render.resolution_x, bpy.context.scene.render.resolution_y
        cam['loaded_intrinsics'] = cam_p[
            'K']  # load default intrinsics from camera.json

        #only load all/selected objects here, use other modules for setting poses, e.g. camera.CameraSampler / object.ObjectPoseSampler
        if scene_id == -1:
            obj_ids = obj_ids if obj_ids else model_p['obj_ids']
            for obj_id in obj_ids:
                self._load_mesh(obj_id, model_p, mm2m=mm2m)
        # replicate scene: load scene objects, object poses, camera intrinsics and camera poses
        else:
            sc_gt = inout.load_scene_gt(
                split_p['scene_gt_tpath'].format(**{'scene_id': scene_id}))
            sc_camera = inout.load_json(
                split_p['scene_camera_tpath'].format(**{'scene_id': scene_id}))

            cm = CameraModule(self.config)

            for i, (cam_id, insts) in enumerate(sc_gt.items()):

                cam_K = np.array(sc_camera[str(cam_id)]['cam_K']).reshape(3, 3)

                cam_H_m2c_ref = np.eye(4)
                cam_H_m2c_ref[:3, :3] = np.array(
                    insts[0]['cam_R_m2c']).reshape(3, 3)
                cam_H_m2c_ref[:3, 3] = np.array(
                    insts[0]['cam_t_m2c']).reshape(3) * mm2m

                if i == 0:
                    # define world = first camera
                    cam_H_m2w_ref = cam_H_m2c_ref.copy()

                    for inst in insts:
                        cur_obj = self._load_mesh(inst['obj_id'],
                                                  model_p,
                                                  mm2m=mm2m)

                        cam_H_m2c = np.eye(4)
                        cam_H_m2c[:3, :3] = np.array(
                            inst['cam_R_m2c']).reshape(3, 3)
                        cam_H_m2c[:3, 3] = np.array(
                            inst['cam_t_m2c']).reshape(3) * mm2m

                        # world = camera @ i=0
                        cam_H_m2w = cam_H_m2c
                        print('-----------------------------')
                        print("Model: {}".format(cam_H_m2w))
                        print('-----------------------------')

                        cur_obj.matrix_world = Matrix(cam_H_m2w)

                cam_H_c2w = np.dot(cam_H_m2w_ref, np.linalg.inv(cam_H_m2c_ref))

                print('-----------------------------')
                print("Cam: {}".format(cam_H_c2w))
                print('-----------------------------')

                config = {"location": [0, 0, 0], "rotation": list([0, 0, 0])}
                cm._add_cam_pose(Config(config), Matrix(cam_H_c2w), cam_K)
Пример #14
0
def main(_):
    tf.logging.set_verbosity(tf.logging.INFO)

    # Load the list examples.
    examples_path = os.path.join(config.TF_DATA_PATH, 'example_lists',
                                 FLAGS.examples_filename)
    tf.logging.info(
        'Loading a list of examples from: {}'.format(examples_path))
    examples_list = tfrecord.load_example_list(examples_path)

    # Load dataset parameters.
    dp_split = dataset_params.get_split_params(config.BOP_PATH, FLAGS.dataset,
                                               FLAGS.split, FLAGS.split_type)

    # Pre-load camera parameters and ground-truth annotations.
    scene_gt = {}
    scene_gt_info = {}
    scene_camera = {}
    scene_ids = set([e['scene_id'] for e in examples_list])
    for scene_id in scene_ids:

        scene_camera[scene_id] = inout.load_scene_camera(
            dp_split['scene_camera_tpath'].format(scene_id=scene_id))

        if FLAGS.add_gt:
            scene_gt[scene_id] = inout.load_scene_gt(
                dp_split['scene_gt_tpath'].format(scene_id=scene_id))
            scene_gt_info[scene_id] = inout.load_json(
                dp_split['scene_gt_info_tpath'].format(scene_id=scene_id),
                keys_to_int=True)

    # Check the name of the file with examples.
    examples_end = '_examples.txt'
    if not FLAGS.examples_filename.endswith(examples_end):
        raise ValueError(
            'Name of the file with examples must end with {}.'.format(
                examples_end))

    # Prepare writer of the TFRecord file.
    output_name = FLAGS.examples_filename.split(examples_end)[0]
    output_path = os.path.join(FLAGS.output_dir, output_name + '.tfrecord')
    writer = tf.python_io.TFRecordWriter(output_path)
    tf.logging.info('File to be created: {}'.format(output_path))

    # Optionally shuffle the examples.
    if FLAGS.shuffle:
        random.shuffle(examples_list)

    # Write the examples to the TFRecord file.
    w_start_t = time.time()

    create_tf_example_partial = partial(create_tf_example,
                                        dp_split=dp_split,
                                        scene_camera=scene_camera,
                                        scene_gt=scene_gt,
                                        scene_gt_info=scene_gt_info)

    for example_id, example in enumerate(examples_list):
        if example_id % 50 == 0:
            tf.logging.info('Processing example {}/{}'.format(
                example_id + 1, len(examples_list)))

        tf_example, _ = create_tf_example_partial(example)
        writer.write(tf_example)

    # Close the writer.
    writer.close()

    w_total_t = time.time() - w_start_t
    tf.logging.info('Writing took {} s.'.format(w_total_t))
Пример #15
0
pose_est_method = m3_args.get('methods', 'object_pose_estimator')
pose_refiner_method = m3_args.get('methods', 'object_pose_refiner')

mask_inf_time = m3_args.getfloat('mask_rcnn', 'inference_time')
mask_base = m3_args.get('mask_rcnn', 'path_to_masks')
shift_depth = (-1, -4) if dataset_name == 'tless' else (0, 0)
# if detector_method:
# from retinanet_detector import RetinaNetDetector
# detector = m3vision.get_detector(detector_method, m3_config_path)
# detector = RetinaNetDetector('/net/rmc-lx0314/home_local/sund_ma/tmp/resnet50_csv_27_frozen2.h5', m3_config_path)

dp_split = dataset_params.get_split_params(datasets_path,
                                           dataset_name,
                                           split,
                                           split_type=split_type)
targets = inout.load_json(
    os.path.join(dp_split['base_path'], 'test_targets_bop19.json'))

if pose_est_method:
    # mp_pose_estimator = AePoseEstimator(os.path.join(workspace_path,'cfg_m3vision/test_config.cfg'))
    mp_pose_estimator = MPPoseEstimator(m3_config_path)

    if pose_refiner_method:
        pose_refiner = m3vision.get_pose_refiner(pose_refiner_method,
                                                 m3_config_path)

        if dataset_name == 'ycbv':
            for m in list(pose_refiner.models.values()):
                for mp in m['pts']:
                    mp[0] *= 1000
                    mp[1] *= 1000
                    mp[2] *= 1000
Пример #16
0
    os.path.join(config.output_path, 'vis_est_poses'),

    # Path templates for output images.
    'vis_rgb_tpath':
    os.path.join('{vis_path}', '{result_name}', '{scene_id:06d}',
                 '{vis_name}.jpg'),
    'vis_depth_diff_tpath':
    os.path.join('{vis_path}', '{result_name}', '{scene_id:06d}',
                 '{vis_name}_depth_diff.jpg'),
}
################################################################################

# Load colors.
colors_path = os.path.join(os.path.dirname(visualization.__file__),
                           'colors.json')
colors = inout.load_json(colors_path)

for result_fname in p['result_filenames']:
    misc.log('Processing: ' + result_fname)

    # Parse info about the method and the dataset from the filename.
    result_name = os.path.splitext(os.path.basename(result_fname))[0]
    result_info = result_name.split('_')
    method = result_info[0]
    dataset_info = result_info[1].split('-')
    dataset = dataset_info[0]
    split = dataset_info[1]
    split_type = dataset_info[2] if len(dataset_info) > 2 else None

    # Load dataset parameters.
    dp_split = dataset_params.get_split_params(p['datasets_path'], dataset,
Пример #17
0
  score_sign = misc.get_score_signature(
    p['correct_th'][err_type], p['visib_gt_min'])

  misc.log('Calculating score - error: {}, method: {}, dataset: {}.'.format(
    err_type, method, dataset))

  # Load dataset parameters.
  dp_split = dataset_params.get_split_params(
    p['datasets_path'], dataset, split, split_type)

  model_type = 'eval'
  dp_model = dataset_params.get_model_params(
    p['datasets_path'], dataset, model_type)

  # Load info about the object models.
  models_info = inout.load_json(dp_model['models_info_path'], keys_to_int=True)

  # Load the estimation targets to consider.
  targets = inout.load_json(
    os.path.join(dp_split['base_path'], p['targets_filename']))
  scene_im_ids = {}

  # Organize the targets by scene, image and object.
  misc.log('Organizing estimation targets...')
  targets_org = {}
  for target in targets:
    targets_org.setdefault(target['scene_id'], {}).setdefault(
      target['im_id'], {})[target['obj_id']] = target

  # Go through the test scenes and match estimated poses to GT poses.
  # ----------------------------------------------------------------------------
Пример #18
0
    def run(self):
        """ Load BOP data """

        datasets_path = os.path.dirname(self.bop_dataset_path)
        dataset = os.path.basename(self.bop_dataset_path)

        print("bob: {}, dataset_path: {}".format(self.bop_dataset_path,
                                                 datasets_path))
        print("dataset: {}".format(dataset))

        try:
            from bop_toolkit_lib import dataset_params, inout
        except ImportError as error:
            print(
                'ERROR: Please download the bop_toolkit package and add it to sys_paths in config!'
            )
            print('https://github.com/thodan/bop_toolkit')
            raise error

        model_p = dataset_params.get_model_params(
            datasets_path,
            dataset,
            model_type=self.model_type if self.model_type else None)
        cam_p = dataset_params.get_camera_params(
            datasets_path,
            dataset,
            cam_type=self.cam_type if self.cam_type else None)
        bpy.data.scenes["Scene"]["num_labels"] = len(model_p['obj_ids'])

        try:
            split_p = dataset_params.get_split_params(datasets_path,
                                                      dataset,
                                                      split=self.split)
        except ValueError:
            raise Exception(
                "Wrong path or {} split does not exist in {}.".format(
                    self.split, dataset))

        bpy.context.scene.world["category_id"] = 0
        bpy.context.scene.render.resolution_x = split_p['im_size'][0]
        bpy.context.scene.render.resolution_y = split_p['im_size'][1]

        # Collect camera and camera object
        cam_ob = bpy.context.scene.camera
        cam = cam_ob.data
        cam['loaded_resolution'] = split_p['im_size'][0], split_p['im_size'][1]
        # load default intrinsics from camera.json
        cam['loaded_intrinsics'] = cam_p['K']

        config = Config({})
        camera_module = CameraModule(config)
        camera_module._set_cam_intrinsics(cam, config)

        loaded_objects = []

        # only load all/selected objects here, use other modules for setting poses
        # e.g. camera.CameraSampler / object.ObjectPoseSampler
        if self.scene_id == -1:
            obj_ids = self.obj_ids if self.obj_ids else model_p['obj_ids']
            # if sampling is enabled
            if self.sample_objects:
                loaded_ids = {}
                loaded_amount = 0
                if self.obj_instances_limit != -1 and len(
                        obj_ids
                ) * self.obj_instances_limit < self.num_of_objs_to_sample:
                    raise RuntimeError(
                        "{}'s {} split contains {} objects, {} object where requested to sample with "
                        "an instances limit of {}. Raise the limit amount or decrease the requested "
                        "amount of objects.".format(self.bop_dataset_path,
                                                    self.split, len(obj_ids),
                                                    self.num_of_objs_to_sample,
                                                    self.obj_instances_limit))
                while loaded_amount != self.num_of_objs_to_sample:
                    random_id = choice(obj_ids)
                    if random_id not in loaded_ids.keys():
                        loaded_ids.update({random_id: 0})
                    # if there is no limit or if there is one, but it is not reached for this particular object
                    if self.obj_instances_limit == -1 or loaded_ids[
                            random_id] < self.obj_instances_limit:
                        cur_obj = self._load_mesh(random_id,
                                                  model_p,
                                                  scale=self.scale)
                        loaded_ids[random_id] += 1
                        loaded_amount += 1
                        loaded_objects.append(cur_obj)
                    else:
                        print(
                            "ID {} was loaded {} times with limit of {}. Total loaded amount {} while {} are "
                            "being requested".format(
                                random_id, loaded_ids[random_id],
                                self.obj_instances_limit, loaded_amount,
                                self.num_of_objs_to_sample))
            else:
                for obj_id in obj_ids:
                    cur_obj = self._load_mesh(obj_id,
                                              model_p,
                                              scale=self.scale)
                    loaded_objects.append(cur_obj)
            self._set_properties(loaded_objects)

        # replicate scene: load scene objects, object poses, camera intrinsics and camera poses
        else:
            sc_gt = inout.load_scene_gt(split_p['scene_gt_tpath'].format(
                **{'scene_id': self.scene_id}))
            sc_camera = inout.load_json(split_p['scene_camera_tpath'].format(
                **{'scene_id': self.scene_id}))
            for i, (cam_id, insts) in enumerate(sc_gt.items()):
                cam_K, cam_H_m2c_ref = self._get_ref_cam_extrinsics_intrinsics(
                    sc_camera, cam_id, insts, self.scale)

                if i == 0:
                    # define world = first camera
                    cam_H_m2w_ref = cam_H_m2c_ref.copy()

                    cur_objs = []
                    # load scene objects and set their poses
                    for inst in insts:
                        cur_objs.append(
                            self._load_mesh(inst['obj_id'],
                                            model_p,
                                            scale=self.scale))
                        self.set_object_pose(cur_objs[-1], inst, self.scale)

                cam_H_c2w = self._compute_camera_to_world_trafo(
                    cam_H_m2w_ref, cam_H_m2c_ref)
                #set camera intrinsics and extrinsics
                config = Config({
                    "cam2world_matrix": list(cam_H_c2w.flatten()),
                    "cam_K": list(cam_K.flatten())
                })
                camera_module._set_cam_intrinsics(cam, config)
                camera_module._set_cam_extrinsics(cam_ob, config)

                # Store new cam pose as next frame
                frame_id = bpy.context.scene.frame_end
                # Copy object poses to next key frame (to be sure)
                for cur_obj in cur_objs:
                    self._insert_key_frames(cur_obj, frame_id)
                camera_module._insert_key_frames(cam, cam_ob, frame_id)
                bpy.context.scene.frame_end = frame_id + 1
Пример #19
0
    def load(bop_dataset_path: str,
             temp_dir: str,
             sys_paths: list,
             model_type: str = "",
             cam_type: str = "",
             split: str = "test",
             scene_id: int = -1,
             obj_ids: list = [],
             sample_objects: bool = False,
             num_of_objs_to_sample: int = None,
             obj_instances_limit: int = -1,
             move_origin_to_x_y_plane: bool = False,
             source_frame: list = ["X", "-Y", "-Z"],
             mm2m: bool = False) -> List[MeshObject]:
        """ Loads the 3D models of any BOP dataset and allows replicating BOP scenes

        - Interfaces with the bob_toolkit, allows loading of train, val and test splits
        - Relative camera poses are loaded/computed with respect to a reference model
        - Sets real camera intrinsics

        :param bop_dataset_path: Full path to a specific bop dataset e.g. /home/user/bop/tless.
        :param temp_dir: A temp directory which is used for writing the temporary .ply file.
        :param sys_paths: System paths to append.
        :param model_type: Optionally, specify type of BOP model.  Available: [reconst, cad or eval].
        :param cam_type: Camera type. If not defined, dataset-specific default camera type is used.
        :param split: Optionally, test or val split depending on BOP dataset.
        :param scene_id: Optionally, specify BOP dataset scene to synthetically replicate. Default: -1 (no scene is replicated,
                         only BOP Objects are loaded).
        :param obj_ids: List of object ids to load. Default: [] (all objects from the given BOP dataset if scene_id is not
                        specified).
        :param sample_objects: Toggles object sampling from the specified dataset.
        :param num_of_objs_to_sample: Amount of objects to sample from the specified dataset. If this amount is bigger than the dataset
                                      actually contains, then all objects will be loaded.
        :param obj_instances_limit: Limits the amount of object copies when sampling. Default: -1 (no limit).
        :param move_origin_to_x_y_plane: Move center of the object to the lower side of the object, this will not work when used in combination with
                                         pose estimation tasks! This is designed for the use-case where BOP objects are used as filler objects in
                                         the background.
        :param source_frame: Can be used if the given positions and rotations are specified in frames different from the blender
                            frame. Has to be a list of three strings. Example: ['X', '-Z', 'Y']: Point (1,2,3) will be transformed
                            to (1, -3, 2). Available: ['X', 'Y', 'Z', '-X', '-Y', '-Z'].
        :param mm2m: Specify whether to convert poses and models to meters.
        :return: The list of loaded mesh objects.
        """

        for sys_path in sys_paths:
            if 'bop_toolkit' in sys_path:
                sys.path.append(sys_path)

        scale = 0.001 if mm2m else 1
        bop_dataset_name = os.path.basename(bop_dataset_path)
        has_external_texture = bop_dataset_name in ["ycbv", "ruapc"]
        if obj_ids or sample_objects:
            allow_duplication = True
        else:
            allow_duplication = False

        datasets_path = os.path.dirname(bop_dataset_path)
        dataset = os.path.basename(bop_dataset_path)

        print("bob: {}, dataset_path: {}".format(bop_dataset_path,
                                                 datasets_path))
        print("dataset: {}".format(dataset))

        try:
            from bop_toolkit_lib import dataset_params, inout
        except ImportError as error:
            print(
                'ERROR: Please download the bop_toolkit package and add it to sys_paths in config!'
            )
            print('https://github.com/thodan/bop_toolkit')
            raise error

        model_p = dataset_params.get_model_params(
            datasets_path,
            dataset,
            model_type=model_type if model_type else None)
        cam_p = dataset_params.get_camera_params(
            datasets_path, dataset, cam_type=cam_type if cam_type else None)

        try:
            split_p = dataset_params.get_split_params(datasets_path,
                                                      dataset,
                                                      split=split)
        except ValueError:
            raise Exception(
                "Wrong path or {} split does not exist in {}.".format(
                    split, dataset))

        bpy.context.scene.world["category_id"] = 0
        bpy.context.scene.render.resolution_x = cam_p['im_size'][0]
        bpy.context.scene.render.resolution_y = cam_p['im_size'][1]

        loaded_objects = []

        # only load all/selected objects here, use other modules for setting poses
        # e.g. camera.CameraSampler / object.ObjectPoseSampler
        if scene_id == -1:

            # TLESS exception because images are cropped
            if bop_dataset_name in ['tless']:
                cam_p['K'][0, 2] = split_p['im_size'][0] / 2
                cam_p['K'][1, 2] = split_p['im_size'][1] / 2

            # set camera intrinsics
            CameraUtility.set_intrinsics_from_K_matrix(cam_p['K'],
                                                       split_p['im_size'][0],
                                                       split_p['im_size'][1])

            obj_ids = obj_ids if obj_ids else model_p['obj_ids']
            # if sampling is enabled
            if sample_objects:
                loaded_ids = {}
                loaded_amount = 0
                if obj_instances_limit != -1 and len(
                        obj_ids) * obj_instances_limit < num_of_objs_to_sample:
                    raise RuntimeError(
                        "{}'s {} split contains {} objects, {} object where requested to sample with "
                        "an instances limit of {}. Raise the limit amount or decrease the requested "
                        "amount of objects.".format(bop_dataset_path, split,
                                                    len(obj_ids),
                                                    num_of_objs_to_sample,
                                                    obj_instances_limit))
                while loaded_amount != num_of_objs_to_sample:
                    random_id = choice(obj_ids)
                    if random_id not in loaded_ids.keys():
                        loaded_ids.update({random_id: 0})
                    # if there is no limit or if there is one, but it is not reached for this particular object
                    if obj_instances_limit == -1 or loaded_ids[
                            random_id] < obj_instances_limit:
                        cur_obj = BopLoader._load_mesh(random_id, model_p,
                                                       bop_dataset_name,
                                                       has_external_texture,
                                                       temp_dir,
                                                       allow_duplication,
                                                       scale)
                        loaded_ids[random_id] += 1
                        loaded_amount += 1
                        loaded_objects.append(cur_obj)
                    else:
                        print(
                            "ID {} was loaded {} times with limit of {}. Total loaded amount {} while {} are "
                            "being requested".format(random_id,
                                                     loaded_ids[random_id],
                                                     obj_instances_limit,
                                                     loaded_amount,
                                                     num_of_objs_to_sample))
            else:
                for obj_id in obj_ids:
                    cur_obj = BopLoader._load_mesh(obj_id, model_p,
                                                   bop_dataset_name,
                                                   has_external_texture,
                                                   temp_dir, allow_duplication,
                                                   scale)
                    loaded_objects.append(cur_obj)

        # replicate scene: load scene objects, object poses, camera intrinsics and camera poses
        else:
            sc_gt = inout.load_scene_gt(
                split_p['scene_gt_tpath'].format(**{'scene_id': scene_id}))
            sc_camera = inout.load_json(
                split_p['scene_camera_tpath'].format(**{'scene_id': scene_id}))
            for i, (cam_id, insts) in enumerate(sc_gt.items()):
                cam_K, cam_H_m2c_ref = BopLoader._get_ref_cam_extrinsics_intrinsics(
                    sc_camera, cam_id, insts, scale)

                if i == 0:
                    # define world = first camera
                    cam_H_m2w_ref = cam_H_m2c_ref.copy()

                    cur_objs = []
                    # load scene objects and set their poses
                    for inst in insts:
                        cur_objs.append(
                            BopLoader._load_mesh(inst['obj_id'], model_p,
                                                 bop_dataset_name,
                                                 has_external_texture,
                                                 temp_dir, allow_duplication,
                                                 scale))
                        BopLoader.set_object_pose(cur_objs[-1], inst, scale)

                cam_H_c2w = BopLoader._compute_camera_to_world_trafo(
                    cam_H_m2w_ref, cam_H_m2c_ref, source_frame)
                # set camera intrinsics
                CameraUtility.set_intrinsics_from_K_matrix(
                    cam_K, split_p['im_size'][0], split_p['im_size'][1])

                # set camera extrinsics as next frame
                frame_id = CameraUtility.add_camera_pose(cam_H_c2w)

                # Add key frame for camera shift, as it changes from frame to frame in the tless replication
                cam = bpy.context.scene.camera.data
                cam.keyframe_insert(data_path='shift_x', frame=frame_id)
                cam.keyframe_insert(data_path='shift_y', frame=frame_id)

                # Copy object poses to key frame (to be sure)
                for cur_obj in cur_objs:
                    BopLoader._insert_key_frames(cur_obj, frame_id)

        # move the origin of the object to the world origin and on top of the X-Y plane
        # makes it easier to place them later on, this does not change the `.location`
        # This is only useful if the BOP objects are not used in a pose estimation scenario.
        if move_origin_to_x_y_plane:
            for obj in loaded_objects:
                obj.move_origin_to_bottom_mean_point()

        return loaded_objects
Пример #20
0
def preprocess_data():
    # Create directory
    Path(f'{SAVE_DIR}/image').mkdir(parents=True, exist_ok=True)
    Path(f'{SAVE_DIR}/model').mkdir(parents=True, exist_ok=True)

    # Parse metadata and load information of images with the target object
    img_info = []

    if not IS_TARGET:
        for sc_id in range(*SCENE_ID_RANGE):
            assert Path(f'{DATASET_DIR}/{sc_id:06d}/scene_gt.json').is_file()
            scene_gt = io.load_scene_gt(
                f'{DATASET_DIR}/{sc_id:06d}/scene_gt.json')

            assert Path(
                f'{DATASET_DIR}/{sc_id:06d}/scene_camera.json').is_file()
            scene_cam = io.load_scene_camera(
                f'{DATASET_DIR}/{sc_id:06d}/scene_camera.json')

            for (im_id, im_gt), im_cam in zip(scene_gt.items(),
                                              scene_cam.values()):
                for gt_id, gt in enumerate(im_gt):
                    if int(gt['obj_id']) == OBJ_ID:
                        assert Path(
                            f'{DATASET_DIR}/{sc_id:06d}/depth/{im_id:06d}.png'
                        ).is_file()
                        depth_path = f'{DATASET_DIR}/{sc_id:06d}/depth/{im_id:06d}.png'

                        assert Path(
                            f'{DATASET_DIR}/{sc_id:06d}/mask_visib/{im_id:06d}_{gt_id:06d}.png'
                        ).is_file()
                        mask_path = f'{DATASET_DIR}/{sc_id:06d}/mask_visib/{im_id:06d}_{gt_id:06d}.png'

                        save_name = f'{sc_id:06d}_{im_id:06d}_{gt_id:06d}'

                        img_info.append(
                            [depth_path, mask_path, im_cam, gt, save_name])
    else:
        targets = io.load_json(
            f'{Path(DATASET_DIR).parent}/test_targets_bop19.json')
        for target in targets:
            if int(target['obj_id']) == OBJ_ID:
                sc_id = target['scene_id']

                assert Path(
                    f'{DATASET_DIR}/{sc_id:06d}/scene_gt.json').is_file()
                scene_gt = io.load_scene_gt(
                    f'{DATASET_DIR}/{sc_id:06d}/scene_gt.json')

                assert Path(
                    f'{DATASET_DIR}/{sc_id:06d}/scene_camera.json').is_file()
                scene_cam = io.load_scene_camera(
                    f'{DATASET_DIR}/{sc_id:06d}/scene_camera.json')

                im_id = int(target['im_id'])
                im_gt, im_cam = scene_gt[im_id], scene_cam[im_id]

                for gt_id, gt in enumerate(im_gt):
                    if int(gt['obj_id']) == OBJ_ID:
                        assert Path(
                            f'{DATASET_DIR}/{sc_id:06d}/depth/{im_id:06d}.png'
                        ).is_file()
                        depth_path = f'{DATASET_DIR}/{sc_id:06d}/depth/{im_id:06d}.png'

                        assert Path(
                            f'{DATASET_DIR}/{sc_id:06d}/mask_visib/{im_id:06d}_{gt_id:06d}.png'
                        ).is_file()
                        mask_path = f'{DATASET_DIR}/{sc_id:06d}/mask_visib/{im_id:06d}_{gt_id:06d}.png'

                        save_name = f'{sc_id:06d}_{im_id:06d}_{gt_id:06d}'

                        img_info.append(
                            [depth_path, mask_path, im_cam, gt, save_name])

    # Read model point cloud
    model_file = f'{Path(DATASET_DIR).parent}/models/obj_{OBJ_ID:06d}.ply'
    assert Path(model_file).is_file()
    model_pcd = read_point_cloud(model_file).voxel_down_sample(VOXEL_SIZE)
    md_pcd_pts = np.asarray(model_pcd.points)

    # Create point cloud from image information
    t1 = time()
    with get_context('spawn').Pool(8) as pool:
        jobs = [
            pool.apply_async(create_patch_pair, (*info, md_pcd_pts))
            for info in img_info
        ]

        df = pd.DataFrame([j.get() for j in jobs]).dropna(axis=0)
        if Path(f'{SAVE_DIR}/labels.csv').is_file():
            old_df = pd.read_csv(f'{SAVE_DIR}/labels.csv', header=None)
            df = pd.concat([old_df, df])
        df.to_csv(f'{SAVE_DIR}/labels.csv',
                  header=['filename', 'num_patches'],
                  index=False)

        t2 = time()
        print(
            f'Created patch_pairs from {len(df)} images. Time elapsed: {t2 - t1 :.3f}'
        )
Пример #21
0
    def run(self):
        """ Load BOP data """

        bop_dataset_path = self.config.get_string("bop_dataset_path")
        scene_id = self.config.get_int("scene_id", -1)
        obj_ids = self.config.get_list("obj_ids", [])
        split = self.config.get_string("split", "test")
        model_type = self.config.get_string("model_type", "")
        cam_type = self.config.get_string("cam_type", "")
        scale = 0.001 if self.config.get_bool("mm2m", False) else 1
        datasets_path = os.path.dirname(bop_dataset_path)
        dataset = os.path.basename(bop_dataset_path)

        print("bob: {}, dataset_path: {}".format(bop_dataset_path,
                                                 datasets_path))
        print("dataset: {}".format(dataset))

        try:
            from bop_toolkit_lib import dataset_params, inout
        except ImportError as error:
            print(
                'ERROR: Please download the bop_toolkit package and add it to sys_paths in config!'
            )
            print('https://github.com/thodan/bop_toolkit')
            raise error

        model_p = dataset_params.get_model_params(
            datasets_path,
            dataset,
            model_type=model_type if model_type else None)
        cam_p = dataset_params.get_camera_params(
            datasets_path, dataset, cam_type=cam_type if cam_type else None)
        bpy.data.scenes["Scene"]["num_labels"] = len(model_p['obj_ids'])

        try:
            split_p = dataset_params.get_split_params(datasets_path,
                                                      dataset,
                                                      split=split)
        except ValueError:
            raise Exception(
                "Wrong path or {} split does not exist in {}.".format(
                    split, dataset))

        bpy.context.scene.world["category_id"] = 0
        bpy.context.scene.render.resolution_x = self.config.get_int(
            "resolution_x", split_p['im_size'][0])
        bpy.context.scene.render.resolution_y = self.config.get_int(
            "resolution_y", split_p['im_size'][1])

        # Collect camera and camera object
        cam_ob = bpy.context.scene.camera
        cam = cam_ob.data
        cam['loaded_resolution'] = bpy.context.scene.render.resolution_x, bpy.context.scene.render.resolution_y
        cam['loaded_intrinsics'] = cam_p[
            'K']  # load default intrinsics from camera.json

        config = Config({})
        camera_module = CameraModule(config)
        camera_module._set_cam_intrinsics(cam, config)

        #only load all/selected objects here, use other modules for setting poses, e.g. camera.CameraSampler / object.ObjectPoseSampler
        if scene_id == -1:
            obj_ids = obj_ids if obj_ids else model_p['obj_ids']
            for obj_id in obj_ids:
                self._load_mesh(obj_id, model_p, scale=scale)
        # replicate scene: load scene objects, object poses, camera intrinsics and camera poses
        else:
            sc_gt = inout.load_scene_gt(
                split_p['scene_gt_tpath'].format(**{'scene_id': scene_id}))
            sc_camera = inout.load_json(
                split_p['scene_camera_tpath'].format(**{'scene_id': scene_id}))

            for i, (cam_id, insts) in enumerate(sc_gt.items()):

                cam_K, cam_H_m2c_ref = self._get_ref_cam_extrinsics_intrinsics(
                    sc_camera, cam_id, insts, scale)

                if i == 0:
                    # define world = first camera
                    cam_H_m2w_ref = cam_H_m2c_ref.copy()

                    # load scene objects
                    for inst in insts:
                        cur_obj = self._load_mesh(inst['obj_id'], model_p)
                        self.set_object_pose(cur_obj, inst, scale)

                cam_H_c2w = self._compute_camera_to_world_trafo(
                    cam_H_m2w_ref, cam_H_m2c_ref)

                #set camera intrinsics and extrinsics
                config = Config({
                    "cam2world_matrix": list(cam_H_c2w.flatten()),
                    "camK": list(cam_K.flatten())
                })
                camera_module._set_cam_intrinsics(cam, config)
                camera_module._set_cam_extrinsics(cam_ob, config)

                # Store new cam pose as next frame
                frame_id = bpy.context.scene.frame_end
                camera_module._insert_key_frames(cam, cam_ob, frame_id)
                bpy.context.scene.frame_end = frame_id + 1
Пример #22
0
}
################################################################################

# Load dataset parameters.
dp_split = dataset_params.get_split_params(p['datasets_path'], p['dataset'],
                                           p['dataset_split'],
                                           p['dataset_split_type'])

model_type = 'eval'  # None = default.
dp_model = dataset_params.get_model_params(p['datasets_path'], p['dataset'],
                                           model_type)

# Load colors.
colors_path = os.path.join(os.path.dirname(visualization.__file__),
                           'colors.json')
colors = inout.load_json(colors_path)

# Subset of images for which the ground-truth poses will be rendered.
if p['targets_filename'] is not None:
    targets = inout.load_json(
        os.path.join(dp_split['base_path'], p['targets_filename']))
    scene_im_ids = {}
    for target in targets:
        scene_im_ids.setdefault(target['scene_id'], set()).add(target['im_id'])
else:
    scene_im_ids = None

# List of considered scenes.
scene_ids_curr = dp_split['scene_ids']
if p['scene_ids']:
    scene_ids_curr = set(scene_ids_curr).intersection(p['scene_ids'])
Пример #23
0
          error['type'], ','.join(map(str, correct_th)))]

        misc.log('Running: ' + ' '.join(calc_scores_cmd))
        if subprocess.call(calc_scores_cmd) != 0:
          raise RuntimeError('Calculation of scores failed.')

        # Path to file with calculated scores.
        score_sign = misc.get_score_signature(correct_th, p['visib_gt_min'])

        scores_filename = 'scores_{}.json'.format(score_sign)
        scores_path = os.path.join(
          p['eval_path'], result_name, error_sign, scores_filename)
        
        # Load the scores.
        misc.log('Loading calculated scores from: {}'.format(scores_path))
        scores = inout.load_json(scores_path)
        recalls.append(scores['recall'])

    average_recalls[error['type']] = np.mean(recalls)

    misc.log('Recall scores: {}'.format(' '.join(map(str, recalls))))
    misc.log('Average recall: {}'.format(average_recalls[error['type']]))

  time_total = time.time() - time_start
  misc.log('Evaluation of {} took {}s.'.format(result_filename, time_total))

  # Calculate the final scores.
  final_scores = {}
  for error in p['errors']:
    final_scores['bop19_average_recall_{}'.format(error['type'])] =\
      average_recalls[error['type']]
Пример #24
0
    return x_abs, y_abs, z_abs, x_ct, y_ct, z_ct


def rmfield(a, *fieldnames_to_remove):
    return a[[
        name for name in a.dtype.names if name not in fieldnames_to_remove
    ]]


if (len(sys.argv) < 2):
    print(
        "python3 tools/2_1_ply_file_to_3d_coord_model.py [cfg_fn] [dataset_name]"
    )

cfg_fn = sys.argv[1]
cfg = inout.load_json(cfg_fn)

dataset = sys.argv[2]
bop_dir, source_dir, model_plys, model_info, model_ids, rgb_files, depth_files, mask_files, gts, cam_param_global = bop_io.get_dataset(
    cfg, dataset)

if not (os.path.exists(bop_dir + "/models_xyz/")):
    os.makedirs(bop_dir + "/models_xyz/")
norm_factor = bop_dir + "/models_xyz/" + "norm_factor.json"
param = {}

for m_id, model_ply in enumerate(model_plys):
    model_id = model_ids[m_id]
    m_info = model_info['{}'.format(model_id)]
    keys = m_info.keys()
    sym_continous = [0, 0, 0, 0, 0, 0]
Пример #25
0
    model_type = 'eval'
    dp_model = dataset_params.get_model_params(p['datasets_path'], dataset,
                                               model_type)

    # Load object models.
    models = {}
    if p['error_type'] in ['ad', 'add', 'adi', 'mssd', 'mspd', 'proj']:
        misc.log('Loading object models...')
        for obj_id in dp_model['obj_ids']:
            models[obj_id] = inout.load_ply(
                dp_model['model_tpath'].format(obj_id=obj_id))

    # Load models info.
    models_info = None
    if p['error_type'] in ['ad', 'add', 'adi', 'vsd', 'mssd', 'mspd', 'cus']:
        models_info = inout.load_json(dp_model['models_info_path'],
                                      keys_to_int=True)

    # Get sets of symmetry transformations for the object models.
    models_sym = None
    if p['error_type'] in ['mssd', 'mspd']:
        models_sym = {}
        for obj_id in dp_model['obj_ids']:
            models_sym[obj_id] = misc.get_symmetry_transformations(
                models_info[obj_id], p['max_sym_disc_step'])

    # Initialize a renderer.
    ren = None
    if p['error_type'] in ['vsd', 'cus']:
        misc.log('Initializing renderer...')
        width, height = dp_split['im_size']
        ren = renderer.create_renderer(width,
Пример #26
0
    def _derive_bop_results(self, out_dir, result_name, grasp_only, logger):
        """Derives BOP results.

    Args:
      out_dir: Path to the output directory.
      result_name: BOP result name. Should be the name of a folder under out_dir
        that contains output from BOP evaluation.
      grasp_only: Whether to derive results on grasped objects only.
      logger: Logger.

    Returns:
      A dictionary holding the results.
    """
        if grasp_only:
            set_str = 'grasp only'
        else:
            set_str = 'all'

        logger.info('Deriving results for *{}*'.format(set_str))

        average_recalls = {}
        average_recalls_obj = defaultdict(lambda: {})

        for error in self._p['errors']:

            error_dir_paths = {}
            if error['type'] == 'vsd':
                for vsd_tau in error['vsd_taus']:
                    error_sign = misc.get_error_signature(
                        error['type'],
                        error['n_top'],
                        vsd_delta=error['vsd_delta'],
                        vsd_tau=vsd_tau)
                    error_dir_paths[error_sign] = os.path.join(
                        result_name, error_sign)
            else:
                error_sign = misc.get_error_signature(error['type'],
                                                      error['n_top'])
                error_dir_paths[error_sign] = os.path.join(
                    result_name, error_sign)

            recalls = []
            recalls_obj = defaultdict(lambda: [])

            for error_sign, error_dir_path in error_dir_paths.items():
                for correct_th in error['correct_th']:

                    score_sign = misc.get_score_signature(
                        correct_th, self._p['visib_gt_min'])
                    matches_filename = "matches_{}.json".format(score_sign)
                    matches_path = os.path.join(out_dir, error_dir_path,
                                                matches_filename)

                    matches = inout.load_json(matches_path)

                    if grasp_only:
                        matches = [
                            m for m in matches if m['obj_id'] ==
                            self._grasp_id[m['scene_id']][m['im_id']]
                        ]

                    scores = score.calc_localization_scores(self._scene_ids,
                                                            self._obj_ids,
                                                            matches,
                                                            error['n_top'],
                                                            do_print=False)

                    recalls.append(scores['recall'])
                    for i, r in scores['obj_recalls'].items():
                        recalls_obj[i].append(r)

            average_recalls[error['type']] = np.mean(recalls)
            for i, r in recalls_obj.items():
                average_recalls_obj[i][error['type']] = np.mean(r)

        results = {i: r * 100 for i, r in average_recalls.items()}
        results['mean'] = np.mean(
            [results['vsd'], results['mssd'], results['mspd']])

        keys, values = tuple(zip(*results.items()))
        table = tabulate(
            [values],
            headers=keys,
            tablefmt='pipe',
            floatfmt='.3f',
            stralign='center',
            numalign='center',
        )
        logger.info('Evaluation results for *{}*: \n'.format(set_str) + table)

        results_per_object = {}
        for i, v in average_recalls_obj.items():
            res = {k: r * 100 for k, r in v.items()}
            res['mean'] = np.mean([res['vsd'], res['mssd'], res['mspd']])
            results_per_object[self._dataset.ycb_classes[i]] = res

        n_cols = 5
        results_tuple = [(k, v['vsd'], v['mssd'], v['mspd'], v['mean'])
                         for k, v in results_per_object.items()]
        results_flatten = list(itertools.chain(*results_tuple))
        results_2d = itertools.zip_longest(
            *[results_flatten[i::n_cols] for i in range(n_cols)])
        table = tabulate(
            results_2d,
            tablefmt='pipe',
            floatfmt='.3f',
            headers=['object', 'vsd', 'mssd', 'mspd', 'mean'] * (n_cols // 5),
            numalign='right',
        )
        logger.info('Per-object scores for *{}*: \n'.format(set_str) + table)

        results['per_obj'] = results_per_object

        return results