コード例 #1
0
ファイル: fragment_test.py プロジェクト: zebrajack/epos
def fragmentation_fps_test():

    output_dir = 'fragmentation_test_output'
    misc.ensure_dir(output_dir)

    datasets = ['hb', 'ycbv', 'tless', 'lmo', 'icbin', 'itodd', 'tudl']

    for dataset in datasets:

        model_type = None
        if dataset == 'tless':
            model_type = 'reconst'
        elif dataset == 'itodd':
            model_type = 'dense'

        dp_model = dataset_params.get_model_params(config.BOP_PATH, dataset,
                                                   model_type)

        for obj_id in dp_model['obj_ids']:
            print('Fragmenting object {} from dataset {}...'.format(
                obj_id, dataset))

            model_fpath = dp_model['model_tpath'].format(obj_id=obj_id)
            model = inout.load_ply(model_fpath)

            # Fragmentation by the furthest point sampling.
            frag_centers, vertex_frag_ids = \
              fragment.fragmentation_fps(model['pts'], num_frags=256)

            # Fragment colors.
            frag_colors = frag_centers - frag_centers.min()
            frag_colors = (255.0 * frag_colors / frag_colors.max()).astype(
                np.uint8)

            # Color the model points by the fragment colors.
            pts_colors = np.zeros((model['pts'].shape[0], 3), np.uint8)
            for frag_id in range(len(frag_centers)):
                pts_colors[vertex_frag_ids == frag_id] = frag_colors[frag_id]

            inout.save_ply(
                os.path.join(
                    output_dir,
                    '{}_obj_{:02d}_fragments.ply'.format(dataset, obj_id)), {
                        'pts': model['pts'],
                        'faces': model['faces'],
                        'colors': pts_colors
                    })
コード例 #2
0
ファイル: evaluation.py プロジェクト: dornik/plausible-poses
def get_tgt_model(obj_id, renderer):
    """
    Load the target object with given [obj_id] as Klampt PointCloud, numpy array, mesh and add it to renderer.
    """

    path_ply = os.path.join(YCBV_PATH, f"models_eval/obj_{obj_id:06d}.ply")
    cloud = resource.get(path_ply).convert('PointCloud')
    cloud.transform(obj_roff[obj_id - 1].T.reshape(9).tolist(), obj_toff[obj_id - 1].reshape(3).tolist())

    ply = inout.load_ply(path_ply)
    ply['pts'] = (obj_roff[obj_id - 1] @ ply['pts'].T + obj_toff[obj_id - 1].reshape(3, 1)).T

    mesh = trimesh.load(path_ply)
    T = np.eye(4)
    T[:3, :3] = obj_roff[obj_id - 1]
    T[:3, 3] = obj_toff[obj_id - 1]
    mesh = mesh.apply_transform(T)

    renderer.add_object(obj_id, os.path.join(YCBV_PATH, f"models/obj_{obj_id:06d}.ply"),
                        offset=[obj_roff[obj_id - 1], obj_toff[obj_id - 1]])
    return cloud, ply, mesh
コード例 #3
0
    split_type_str = ' - ' + split_type if split_type is not None else ''

    # Load dataset parameters.
    dp_split = dataset_params.get_split_params(p['datasets_path'], dataset,
                                               split, split_type)

    model_type = 'eval'
    dp_model = dataset_params.get_model_params(p['datasets_path'], dataset,
                                               model_type)

    # Load object models.
    models = {}
    if p['error_type'] in ['ad', 'add', 'adi', 'mssd', 'mspd', 'proj']:
        misc.log('Loading object models...')
        for obj_id in dp_model['obj_ids']:
            models[obj_id] = inout.load_ply(
                dp_model['model_tpath'].format(obj_id=obj_id))

    # Load models info.
    models_info = None
    if p['error_type'] in ['ad', 'add', 'adi', 'vsd', 'mssd', 'mspd', 'cus']:
        models_info = inout.load_json(dp_model['models_info_path'],
                                      keys_to_int=True)

    # Get sets of symmetry transformations for the object models.
    models_sym = None
    if p['error_type'] in ['mssd', 'mspd']:
        models_sym = {}
        for obj_id in dp_model['obj_ids']:
            models_sym[obj_id] = misc.get_symmetry_transformations(
                models_info[obj_id], p['max_sym_disc_step'])
コード例 #4
0
        th_outlier = [th_outliers[m_id]]  #provid a fixed outlier value
        print("Set outlier threshold to ", th_outlier[0])
    recog_temp = recog.pix2pose(weight_fn,
                                camK=cam_K,
                                res_x=im_width,
                                res_y=im_height,
                                obj_param=obj_param,
                                th_ransac=th_ransac,
                                th_outlier=th_outlier,
                                th_inlier=th_inlier,
                                backbone=backbone)
    obj_pix2pose.append(recog_temp)
    obj_names.append(model_id)
    ply_fn = model_plys[m_id]
    if (gpu_rendering):
        obj_model = inout.load_ply(ply_fn)
        obj_model['pts'] = obj_model['pts'] * 0.001  #mm to m scale
    else:
        obj_model = Model3D()
        obj_model.load(ply_fn, scale=0.001)  #mm to m scale
    obj_models.append(obj_model)
    scales = obj_param[:3] / 1000
    obj_dia = np.sqrt(scales[0]**2 + scales[1]**2 + scales[2]**2)
    obj_diameter.append(obj_dia)

test_target_fn = cfg['test_target']
target_list = bop_io.get_target_list(
    os.path.join(bop_dir, test_target_fn + ".json"))


def fcn(diff_depth):
コード例 #5
0
ファイル: renderer_py.py プロジェクト: tinylife/bop_toolkit
    def add_object(self, obj_id, model_path, **kwargs):
        """See base class."""
        # Color of the object model (the original color saved with the object model
        # will be used if None).
        surf_color = None
        if 'surf_color' in kwargs:
            surf_color = kwargs['surf_color']

        # Load the object model.
        model = inout.load_ply(model_path)
        self.models[obj_id] = model

        # Calculate the 3D bounding box of the model (will be used to set the near
        # and far clipping plane).
        bb = misc.calc_3d_bbox(model['pts'][:, 0], model['pts'][:, 1],
                               model['pts'][:, 2])
        self.model_bbox_corners[obj_id] = np.array([
            [bb[0], bb[1], bb[2]],
            [bb[0], bb[1], bb[2] + bb[5]],
            [bb[0], bb[1] + bb[4], bb[2]],
            [bb[0], bb[1] + bb[4], bb[2] + bb[5]],
            [bb[0] + bb[3], bb[1], bb[2]],
            [bb[0] + bb[3], bb[1], bb[2] + bb[5]],
            [bb[0] + bb[3], bb[1] + bb[4], bb[2]],
            [bb[0] + bb[3], bb[1] + bb[4], bb[2] + bb[5]],
        ])

        # Set texture/color of vertices.
        self.model_textures[obj_id] = None

        # Use the specified uniform surface color.
        if surf_color is not None:
            colors = np.tile(
                list(surf_color) + [1.0], [model['pts'].shape[0], 1])

            # Set UV texture coordinates to dummy values.
            texture_uv = np.zeros((model['pts'].shape[0], 2), np.float32)

        # Use the model texture.
        elif 'texture_file' in self.models[obj_id].keys():
            model_texture_path = os.path.join(
                os.path.dirname(model_path),
                self.models[obj_id]['texture_file'])
            model_texture = inout.load_im(model_texture_path)

            # Normalize the texture image.
            if model_texture.max() > 1.0:
                model_texture = model_texture.astype(np.float32) / 255.0
            model_texture = np.flipud(model_texture)
            self.model_textures[obj_id] = model_texture

            # UV texture coordinates.
            texture_uv = model['texture_uv']

            # Set the per-vertex color to dummy values.
            colors = np.zeros((model['pts'].shape[0], 3), np.float32)

        # Use the original model color.
        elif 'colors' in model.keys():
            assert (model['pts'].shape[0] == model['colors'].shape[0])
            colors = model['colors']
            if colors.max() > 1.0:
                colors /= 255.0  # Color values are expected in range [0, 1].

            # Set UV texture coordinates to dummy values.
            texture_uv = np.zeros((model['pts'].shape[0], 2), np.float32)

        # Set the model color to gray.
        else:
            colors = np.ones((model['pts'].shape[0], 3), np.float32) * 0.5

            # Set UV texture coordinates to dummy values.
            texture_uv = np.zeros((model['pts'].shape[0], 2), np.float32)

        # Set the vertex data.
        if self.mode == 'depth':
            vertices_type = [('a_position', np.float32, 3),
                             ('a_color', np.float32, colors.shape[1])]
            vertices = np.array(list(zip(model['pts'], colors)), vertices_type)
        else:
            if self.shading == 'flat':
                vertices_type = [('a_position', np.float32, 3),
                                 ('a_color', np.float32, colors.shape[1]),
                                 ('a_texcoord', np.float32, 2)]
                vertices = np.array(
                    list(zip(model['pts'], colors, texture_uv)), vertices_type)
            elif self.shading == 'phong':
                vertices_type = [('a_position', np.float32, 3),
                                 ('a_normal', np.float32, 3),
                                 ('a_color', np.float32, colors.shape[1]),
                                 ('a_texcoord', np.float32, 2)]
                vertices = np.array(
                    list(
                        zip(model['pts'], model['normals'], colors,
                            texture_uv)), vertices_type)
            else:
                raise ValueError('Unknown shading type.')

        # Create vertex and index buffer for the loaded object model.
        self.vertex_buffers[obj_id] = vertices.view(gloo.VertexBuffer)
        self.index_buffers[obj_id] = \
          model['faces'].flatten().astype(np.uint32).view(gloo.IndexBuffer)

        # Set shader for the selected shading.
        if self.shading == 'flat':
            rgb_fragment_code = _rgb_fragment_flat_code
        elif self.shading == 'phong':
            rgb_fragment_code = _rgb_fragment_phong_code
        else:
            raise ValueError('Unknown shading type.')

        # Prepare the RGB OpenGL program.
        rgb_program = gloo.Program(_rgb_vertex_code, rgb_fragment_code)
        rgb_program.bind(self.vertex_buffers[obj_id])
        if self.model_textures[obj_id] is not None:
            rgb_program['u_use_texture'] = int(True)
            rgb_program['u_texture'] = self.model_textures[obj_id]
        else:
            rgb_program['u_use_texture'] = int(False)
            rgb_program['u_texture'] = np.zeros((1, 1, 4), np.float32)
        self.rgb_programs[obj_id] = rgb_program

        # Prepare the depth OpenGL program.
        depth_program = gloo.Program(_depth_vertex_code, _depth_fragment_code)
        depth_program.bind(self.vertex_buffers[obj_id])
        self.depth_programs[obj_id] = depth_program
コード例 #6
0
ファイル: ros_pix2pose.py プロジェクト: GraceJary/Pix2Pose
    def __init__(self, cfg):
        self.cfg = cfg
        self.rgb_topic = cfg['rgb_topic']
        self.depth_topic = cfg['depth_topic']
        self.camK = np.array(cfg['cam_K']).reshape(3, 3)
        self.im_width = int(cfg['im_width'])
        self.im_height = int(cfg['im_height'])
        self.inlier_th = float(cfg['inlier_th'])
        self.ransac_th = float(cfg['ransac_th'])
        self.pub_before_icp = False
        self.graph = tf_backend.Graph()
        if (int(cfg['icp']) == 1):
            self.icp = True
        else:
            self.icp = False
        self.model_params = inout.load_json(cfg['norm_factor_fn'])
        self.detection_labels = cfg[
            'obj_labels']  #labels of corresponding detections
        n_objs = int(cfg['n_objs'])
        self.target_objs = cfg['target_obj_name']
        self.colors = np.random.randint(0, 255, (n_objs, 3))

        with self.graph.as_default():
            if (detect_type == "rcnn"):
                #Load mask r_cnn
                '''
                standard estimation parameter for Mask R-CNN (identical for all dataset)
                '''
                self.config = BopInferenceConfig(dataset="ros",
                                                 num_classes=n_objs + 1,
                                                 im_width=self.im_width,
                                                 im_height=self.im_height)
                self.config.DETECTION_MIN_CONFIDENCE = 0.3
                self.config.DETECTION_MAX_INSTANCES = 30
                self.config.DETECTION_NMS_THRESHOLD = 0.5

                self.detection_model = modellib.MaskRCNN(mode="inference",
                                                         config=self.config,
                                                         model_dir="/")
                self.detection_model.load_weights(
                    cfg['path_to_detection_weights'], by_name=True)

            self.obj_models = []
            self.obj_bboxes = []

            self.obj_pix2pose = []
            pix2pose_dir = cfg['path_to_pix2pose_weights']
            th_outlier = cfg['outlier_th']
            self.model_scale = cfg['model_scale']
            for t_id, target_obj in enumerate(self.target_objs):
                weight_fn = os.path.join(
                    pix2pose_dir, "{:02d}/inference.hdf5".format(target_obj))
                print("Load pix2pose weights from ", weight_fn)
                model_param = self.model_params['{}'.format(target_obj)]
                obj_param = bop_io.get_model_params(model_param)
                recog_temp = recog.pix2pose(weight_fn,
                                            camK=self.camK,
                                            res_x=self.im_width,
                                            res_y=self.im_height,
                                            obj_param=obj_param,
                                            th_ransac=self.ransac_th,
                                            th_outlier=th_outlier,
                                            th_inlier=self.inlier_th)
                self.obj_pix2pose.append(recog_temp)
                ply_fn = os.path.join(self.cfg['model_dir'],
                                      self.cfg['ply_files'][t_id])
                if (self.icp):
                    #for pyrender rendering
                    obj_model = trimesh.load_mesh(ply_fn)
                    obj_model.vertices = obj_model.vertices * self.model_scale
                    mesh = pyrender.Mesh.from_trimesh(obj_model)
                    self.obj_models.append(mesh)
                    self.obj_bboxes.append(
                        self.get_3d_box_points(obj_model.vertices))

                else:
                    obj_model = inout.load_ply(ply_fn)
                    self.obj_bboxes.append(
                        self.get_3d_box_points(obj_model['pts']))

                rospy.init_node('pix2pose', anonymous=True)
                self.detect_pub = rospy.Publisher("/pix2pose/detected_object",
                                                  ros_image)

                #self.pose_pub = rospy.Publisher("/pix2pose/object_pose", Pose)
                self.pose_pub = rospy.Publisher("/pix2pose/object_pose",
                                                ros_image)
                self.have_depth = False

                if (self.icp):
                    self.sub_depth = rospy.Subscriber(self.depth_topic,
                                                      ros_image,
                                                      self.callback_depth,
                                                      queue_size=1)
                    if (self.pub_before_icp):
                        self.pose_pub_noicp = rospy.Publisher(
                            "/pix2pose/object_pose_noicp", ros_image)

        self.depth_img = np.zeros((self.im_height, self.im_width))
        self.sub = rospy.Subscriber(self.rgb_topic,
                                    ros_image,
                                    self.callback,
                                    queue_size=1)
コード例 #7
0
  misc.ensure_dir(os.path.dirname(out_rgb_tpath.format(
    out_path=out_path, obj_id=obj_id, im_id=0)))
  misc.ensure_dir(os.path.dirname(out_depth_tpath.format(
    out_path=out_path, obj_id=obj_id, im_id=0)))
  misc.ensure_dir(os.path.dirname(out_uv_tpath.format(
    out_path=out_path, obj_id=obj_id, im_id=0)))
  misc.ensure_dir(os.path.dirname(out_mask_tpath.format(
    out_path=out_path, obj_id=obj_id, im_id=0)))
  misc.ensure_dir(os.path.dirname(out_scene_camera_tpath.format(
    out_path=out_path, obj_id=obj_id)))
  misc.ensure_dir(os.path.dirname(out_scene_gt_tpath.format(
    out_path=out_path, obj_id=obj_id)))

  # Load model.
  model_path = dp_model['model_tpath'].format(obj_id=obj_id)
  model = inout.load_ply(model_path)
  model_uv_path = dp_model['model_uv_tpath'].format(obj_id=obj_id)
  model_uv = inout.load_ply(model_uv_path)

  # Load model texture.
  if 'texture_file' in model:
    model_texture_path =\
      os.path.join(os.path.dirname(model_path), model['texture_file'])
    model_texture = inout.load_im(model_texture_path)
  else:
    model_texture = None
  model_uv_texture = None

  scene_camera = {}
  scene_gt = {}
  im_id = 0
コード例 #8
0
    ren_depth.add_object(obj_id, dp_model['model_tpath'].format(obj_id=obj_id))

# Render training images for all object models.
for obj_id in obj_ids[:2]:  # for debugging

    # Prepare output folders.
    misc.ensure_dir(
        os.path.dirname(
            out_rgb_tpath.format(out_path=out_path, obj_id=obj_id, im_id=0)))
    misc.ensure_dir(
        os.path.dirname(
            out_depth_tpath.format(out_path=out_path, obj_id=obj_id, im_id=0)))

    # Load model.
    model_path = dp_model['model_tpath'].format(obj_id=obj_id)
    model = inout.load_ply(model_path)

    # Load model texture.
    if 'texture_file' in model:
        model_texture_path =\
          os.path.join(os.path.dirname(model_path), model['texture_file'])
        model_texture = inout.load_im(model_texture_path)
    else:
        model_texture = None

    scene_camera = {}
    scene_gt = {}
    im_id = 0
    for radius in radii:
        # Sample viewpoints.
        view_sampler_mode = 'hinterstoisser'  # 'hinterstoisser' or 'fibonacci'.