コード例 #1
0
def load_model_info(path):
    models = {}

    model_info = load_yaml(path)
    for key, val in model_info.items():
        name = '{:02d}'.format(int(key))
        models[name] = Model3D()

        minx = val['min_x']
        miny = val['min_y']
        minz = val['min_z']
        maxx = -minx
        maxy = -miny
        maxz = -minz
        models[name].bb = []
        models[name].bb.append([minx, miny, minz])
        models[name].bb.append([minx, maxy, minz])
        models[name].bb.append([minx, miny, maxz])
        models[name].bb.append([minx, maxy, maxz])
        models[name].bb.append([maxx, miny, minz])
        models[name].bb.append([maxx, maxy, minz])
        models[name].bb.append([maxx, miny, maxz])
        models[name].bb.append([maxx, maxy, maxz])

        models[name].diameter = val['diameter']

    return models
コード例 #2
0
ファイル: sixd.py プロジェクト: fabi92/ssd-6d
def load_sixd(base_path, seq, nr_frames=0, load_mesh=True):

    bench = Benchmark()
    bench.scale_to_meters = 0.001
    if os.path.exists(os.path.join(base_path, 'camera.yml')):
        cam_info = load_yaml(os.path.join(base_path, 'camera.yml'))
        bench.cam[0, 0] = cam_info['fx']
        bench.cam[0, 2] = cam_info['cx']
        bench.cam[1, 1] = cam_info['fy']
        bench.cam[1, 2] = cam_info['cy']
        bench.scale_to_meters = 0.001 * cam_info['depth_scale']
    else:
        print("Could not find camera.yml. Taking camera matrix of the first frame instead.")

    model_info = load_yaml(os.path.join(base_path, 'models', 'models_info.yml'))
    for key, val in model_info.items():
        name = 'obj_{:02d}'.format(int(key))
        bench.models[name] = Model3D()
        bench.models[name].diameter = val['diameter']

    if seq is None:
        return bench

    path = os.path.join(base_path, 'test/{:02d}/'.format(seq))
    info = load_info(os.path.join(path, 'info.yml'))
    gts = load_gt(os.path.join(path, 'gt.yml'))

    # Load frames
    nr_frames = nr_frames if nr_frames > 0 else len(info)
    for i in range(nr_frames):
        fr = Frame()
        fr.nr = i
        nr_string = '{:04d}'.format(i)
        fr.color = cv2.imread(path + "rgb/" + nr_string + ".png").astype(np.float32) / 255.0
        fr.depth = cv2.imread(path + "depth/" + nr_string + ".png", -1).astype(np.float32) * bench.scale_to_meters
        if os.path.exists(path + 'mask'):
            fr.mask = cv2.imread(path + 'mask/' + nr_string + ".png", -1)

        for gt in gts[i]:
            pose = np.identity(4)
            pose[:3, :3] = gt['cam_R_m2c']
            pose[:3, 3] = np.squeeze(gt['cam_t_m2c']) * bench.scale_to_meters
            fr.gt.append((gt['obj_id'], pose, gt['obj_bb']))

        fr.cam = info[i]['cam_K']

        if bench.cam is None:
            bench.cam = info[i]['cam_K']

        bench.frames.append(fr)

    if load_mesh:
        # Build a set of all used model IDs for this sequence
        all_gts = list(itertools.chain(*[f.gt for f in bench.frames]))
        for ID in set([gt[0] for gt in all_gts]):
            name = 'obj_{:02d}'.format(ID)
            bench.models[name].load(os.path.join(base_path, 'models/' + name + '.ply'), scale=bench.scale_to_meters)

    return bench
コード例 #3
0
def load_sixd(base_path, sequences, load_mesh=True):
    bench = Benchmark()
    bench.scale_to_meters = 0.001
    if os.path.exists(os.path.join(base_path, 'camera.yml')):
        cam_info = load_yaml(os.path.join(base_path, 'camera.yml'))
        bench.cam = np.eye(3)
        bench.cam[0, 0] = cam_info['fx']
        bench.cam[0, 2] = cam_info['cx']
        bench.cam[1, 1] = cam_info['fy']
        bench.cam[1, 2] = cam_info['cy']
        bench.scale_to_meters = 0.001 * cam_info['depth_scale']
    else:
        print(
            "Could not find camera.yml. Taking camera matrix of the first frame instead."
        )

    model_info = load_yaml(os.path.join(base_path, 'models',
                                        'models_info.yml'))

    for key, val in model_info.items():
        name = 'obj_{:06d}'.format(int(key))  # key: (1,2,3,4,5)
        bench.models[name] = Model3D()
        bench.models[name].diameter = val['diameter']

    if sequences is None:
        return bench

    if load_mesh:
        for sequence in sequences:
            name = 'obj_{:06d}'.format(int(sequence))
            model = bench.models[name]
            model.load(os.path.join(base_path, 'models/' + name + '.ply'),
                       scale=bench.scale_to_meters)
            fps = FPS(model.vertices, 12)
            fps.fit()
            model.fps = np.concatenate([
                fps.selected_points,
                np.ones((fps.n_selected_pts, 1), dtype=float)
            ],
                                       axis=1)

    return bench
コード例 #4
0
ファイル: linemod_load.py プロジェクト: peytonhong/CVAE_Pose
    def __init__(self,
                 root_dir,
                 background_dir,
                 task,
                 object_number,
                 transform=None,
                 augmentation=False,
                 rendering=False,
                 use_offline_data=True,
                 use_useful_data=True):
        """
        Args:
            root_dir (string): Path to the LineMod dataset.
            object_number (int): Unique number of an object. (1, 2, ..., 15) -> converted into str (000001, 000002, ..., 000015)
            transform (callable, optional): Optional transform to be applied on a sample.
            use_offline_data: use saved(offline) images, if False: create images on every iterations (steps)
        """
        self.root_dir = Path(root_dir)
        assert (task in ['train', 'test']), "The task must be train or test."
        self.task = task
        self.object_number = f'{int(object_number):06}'
        self.object_path = self.root_dir / self.task / self.object_number
        self.image_path = self.object_path / 'rgb'
        self.images = sorted(glob.glob(str(self.image_path / '*.png')))
        self.transform = transform
        self.COCO_dir = background_dir
        self.backgrounds = glob.glob(str(self.COCO_dir / '*'))
        self.mask_path = self.object_path / 'mask'
        self.masks = sorted(glob.glob(str(self.mask_path / '*')))
        self.augmentation = augmentation
        self.rendering = rendering
        self.use_offline_data = use_offline_data
        self.use_useful_data = use_useful_data
        self.images_useful = []

        # image augmentation paths (To reduce training time by saving augmented images in advance)
        self.max_num_aug_images = 20000
        self.cropped_image_path = self.object_path / 'rgb_cropped'
        self.aug_image_path = self.object_path / 'rgb_aug'
        self.aug_useful_path = self.object_path / 'rgb_useful'
        self.cropped_mask_path = self.object_path / 'mask_cropped'
        self.gt_image_path = self.object_path / 'rbg_gt_cropped'
        if not self.cropped_image_path.exists():
            self.cropped_image_path.mkdir()
        if not self.aug_image_path.exists():
            self.aug_image_path.mkdir()
        if not self.aug_useful_path.exists():
            self.aug_useful_path.mkdir()
        if not self.cropped_mask_path.exists():
            self.cropped_mask_path.mkdir()
        if not self.gt_image_path.exists():
            self.gt_image_path.mkdir()

        with open(self.object_path / 'scene_gt.json') as json_file:
            gt = json.load(json_file)
        with open(self.object_path / 'scene_gt_info.json') as json_file:
            gt_bbox = json.load(json_file)
        self.R_matrix = self.gt_R_matrix_load(gt_json=gt)
        self.bbox = self.gt_bbox_load(gt_json=gt_bbox)

        # Check number of saved images and recreate if the number is not matched to self.max_num_aug_images
        if self.use_offline_data:
            self.save_sample_images()
            self.images_cropped = sorted(self.cropped_image_path.glob('*'))
            self.masks_cropped = sorted(self.cropped_mask_path.glob('*'))
            self.images_aug = sorted(self.aug_image_path.glob('*'))
            self.images_gt_cropped = sorted(self.gt_image_path.glob('*'))

        # pointcloud model data import
        if self.rendering:
            from rendering.renderer_xyz import Renderer
            from rendering.model import Model3D
            model_path = str(self.root_dir / 'models' /
                             f'obj_{object_number:06d}.ply')
            if platform == 'win32':  # only for Windows
                model_path = model_path.replace(
                    os.sep, os.altsep)  # replace '\' to '/'
            self.obj_model = Model3D()
            self.obj_model.load(model_path, scale=0.001)
            # model_data = PlyData.read(model_path)
            # (self.model_x, self.model_y, self.model_z) = (np.array(model_data['vertex'][t]) for t in ('x', 'y', 'z'))
            # (r, g, b, a) = (np.array(model_data['vertex'][t])/255 for t in ('red', 'green', 'blue', 'alpha'))
            # self.model_colors = np.vstack((r,g,b,a)).transpose()
            scene_camera_path = self.root_dir / 'train' / f'{object_number:06d}' / 'scene_camera.json'
            with open(scene_camera_path) as json_file:
                scene_camera = json.load(json_file)
            cam_K = scene_camera['0']['cam_K']
            cam_K = np.array(cam_K).reshape((3, 3))
            cam_T = gt['0'][0]['cam_t_m2c']
            self.cam_T = np.array(cam_T) / 1000  # [mm] -> [m]
            self.ren = Renderer((640, 480), cam_K)  # shape: (width, height)
コード例 #5
0
def load_sixd(base_path, seq, nr_frames=0, load_mesh=True, subset_models=[]):

    bench = Benchmark()
    bench.scale_to_meters = 0.001
    if os.path.exists(os.path.join(base_path, 'camera.yml')):
        cam_info = load_yaml(os.path.join(base_path, 'camera.yml'))
        bench.cam[0, 0] = cam_info['fx']
        bench.cam[0, 2] = cam_info['cx']
        bench.cam[1, 1] = cam_info['fy']
        bench.cam[1, 2] = cam_info['cy']
        bench.scale_to_meters = 0.001 * cam_info['depth_scale']
    else:
        raise FileNotFoundError

    models_path = 'models'
    if not os.path.exists(os.path.join(base_path, models_path)):
        models_path = 'models_reconst'

    model_info = load_yaml(
        os.path.join(base_path, models_path, 'models_info.yml'))
    for key, val in model_info.items():
        bench.models[str(key)] = Model3D()
        bench.models[str(key)].diameter = val['diameter']

    if seq is None:
        return bench

    path = base_path + '/test/{:02d}/'.format(int(seq))
    info = load_info(path + 'info.yml')
    gts = load_gt(path + 'gt.yml')

    # Load frames
    nr_frames = nr_frames if nr_frames > 0 else len(info)
    for i in range(nr_frames):
        fr = Frame()
        fr.nr = i
        nr_string = '{:05d}'.format(
            i) if 'tudlight' in base_path else '{:04d}'.format(i)
        fr.color = cv2.imread(os.path.join(
            path, "rgb", nr_string + ".png")).astype(np.float32) / 255.0
        fr.depth = cv2.imread(os.path.join(path, "depth", nr_string + ".png"), -1).astype(np.float32)\
                   * bench.scale_to_meters
        if 'tless' in base_path:  # TLESS depth is in micrometers... why not nano? :D
            fr.depth *= 10
        if os.path.exists(os.path.join(path, 'mask')):
            fr.mask = cv2.imread(
                os.path.join(path, 'mask', nr_string + ".png"), -1)

        for gt in gts[i]:
            if subset_models and str(gt['obj_id']) not in subset_models:
                continue

            pose = np.identity(4)
            pose[:3, :3] = gt['cam_R_m2c']
            pose[:3, 3] = np.squeeze(gt['cam_t_m2c']) * bench.scale_to_meters
            if str(int(gt['obj_id'])) == str(int(seq)):
                fr.gt.append((str(gt['obj_id']), pose, gt['obj_bb']))

        fr.cam = info[i]['cam_K']
        bench.frames.append(fr)

    if load_mesh:
        # Build a set of all used model IDs for this sequence
        all_gts = list(itertools.chain(*[f.gt for f in bench.frames]))
        for ID in set([gt[0] for gt in all_gts]):
            bench.models[str(ID)].load(os.path.join(
                base_path, "models/obj_{:02d}.ply".format(int(ID))),
                                       scale_to_meter=bench.scale_to_meters)

    return bench
コード例 #6
0
                                camK=cam_K,
                                res_x=im_width,
                                res_y=im_height,
                                obj_param=obj_param,
                                th_ransac=th_ransac,
                                th_outlier=th_outlier,
                                th_inlier=th_inlier,
                                backbone=backbone)
    obj_pix2pose.append(recog_temp)
    obj_names.append(model_id)
    ply_fn = model_plys[m_id]
    if (gpu_rendering):
        obj_model = inout.load_ply(ply_fn)
        obj_model['pts'] = obj_model['pts'] * 0.001  #mm to m scale
    else:
        obj_model = Model3D()
        obj_model.load(ply_fn, scale=0.001)  #mm to m scale
    obj_models.append(obj_model)
    scales = obj_param[:3] / 1000
    obj_dia = np.sqrt(scales[0]**2 + scales[1]**2 + scales[2]**2)
    obj_diameter.append(obj_dia)

test_target_fn = cfg['test_target']
target_list = bop_io.get_target_list(
    os.path.join(bop_dir, test_target_fn + ".json"))


def fcn(diff_depth):
    return np.sum(np.maximum(0, 0.02 - diff_depth) / 0.02)

コード例 #7
0
ファイル: ros_pix2pose.py プロジェクト: hz-ants/Pix2Pose
    def __init__(self, cfg):
        self.cfg = cfg
        self.rgb_topic = cfg['rgb_topic']
        self.depth_topic = cfg['depth_topic']
        self.camK = np.array(cfg['cam_K']).reshape(3, 3)
        self.im_width = int(cfg['im_width'])
        self.im_height = int(cfg['im_height'])
        self.inlier_th = float(cfg['inlier_th'])
        self.ransac_th = float(cfg['ransac_th'])

        self.model_params = inout.load_json(cfg['norm_factor_fn'])
        self.detection_labels = cfg[
            'obj_labels']  #labels of corresponding detections
        if (icp):
            self.ren = Renderer((self.im_width, self.im_height), self.camK)

        n_objs = int(cfg['n_objs'])
        self.target_objs = cfg['target_obj_name']
        self.colors = np.random.randint(0, 255, (n_objs, 3))
        if (detect_type == "rcnn"):
            #Load mask r_cnn
            '''
            standard estimation parameter for Mask R-CNN (identical for all dataset)
            '''
            self.config = BopInferenceConfig(dataset="ros",
                                             num_classes=n_objs + 1,
                                             im_width=self.im_width,
                                             im_height=self.im_height)
            self.config.DETECTION_MIN_CONFIDENCE = 0.3
            self.config.DETECTION_MAX_INSTANCES = 30
            self.config.DETECTION_NMS_THRESHOLD = 0.5

            self.detection_model = modellib.MaskRCNN(mode="inference",
                                                     config=self.config,
                                                     model_dir="/")
            self.detection_model.load_weights(cfg['path_to_detection_weights'],
                                              by_name=True)

        self.obj_models = []
        self.obj_bboxes = []

        self.obj_pix2pose = []
        pix2pose_dir = cfg['path_to_pix2pose_weights']
        th_outlier = cfg['outlier_th']
        self.model_scale = cfg['model_scale']
        for t_id, target_obj in enumerate(self.target_objs):
            weight_fn = os.path.join(
                pix2pose_dir, "{:02d}/inference.hdf5".format(target_obj))
            print("Load pix2pose weights from ", weight_fn)
            model_param = self.model_params['{}'.format(target_obj)]
            obj_param = bop_io.get_model_params(model_param)
            recog_temp = recog.pix2pose(weight_fn,
                                        camK=self.camK,
                                        res_x=self.im_width,
                                        res_y=self.im_height,
                                        obj_param=obj_param,
                                        th_ransac=self.ransac_th,
                                        th_outlier=th_outlier,
                                        th_inlier=self.inlier_th)
            self.obj_pix2pose.append(recog_temp)
            ply_fn = os.path.join(self.cfg['model_dir'],
                                  self.cfg['ply_files'][t_id])
            if (icp):
                obj_model = Model3D()
                obj_model.load(ply_fn, scale=cfg['model_scale'])
                self.obj_models.append(obj_model)
            else:
                obj_model = inout.load_ply(ply_fn)
                self.obj_bboxes.append(self.get_3d_box_points(
                    obj_model['pts']))

        rospy.init_node('pix2pose', anonymous=True)
        self.detect_pub = rospy.Publisher("/pix2pose/detected_object",
                                          ros_image)

        #self.pose_pub = rospy.Publisher("/pix2pose/object_pose", Pose)
        self.pose_pub = rospy.Publisher("/pix2pose/object_pose", ros_image)
        self.sub = rospy.Subscriber(self.rgb_topic, ros_image, self.callback)
        self.graph = tf.get_default_graph()