Exemplo n.º 1
0
    def __init__(self, cls_name, render_num=10000, fuse_num=10000, ms_num=10000,
                 has_render_set=True, has_fuse_set=True):
        self.cls_name=cls_name

        # some dirs for processing
        os.path.join(cfg.LINEMOD,'posedb','{}_render.pkl'.format(cls_name))
        self.linemod_dir=cfg.LINEMOD
        self.render_dir='{}/renders'.format(cls_name)
        self.rgb_dir='{}/JPEGImages'.format(cls_name)
        self.mask_dir='{}/mask'.format(cls_name)
        self.rt_dir=os.path.join(cfg.DATA_DIR,'LINEMOD_ORIG',cls_name,'data')
        self.render_num=render_num

        self.test_fn='{}/test.txt'.format(cls_name)
        self.train_fn='{}/train.txt'.format(cls_name)
        self.val_fn='{}/val.txt'.format(cls_name)

        if has_render_set:
            self.render_pkl=os.path.join(cfg.DATA_DIR,'posedb','{}_render.pkl'.format(cls_name))#(self.linemod_dir,'posedb','{}_render.pkl'.format(cls_name)), changed
            # prepare dataset
            if os.path.exists(self.render_pkl):
                # read cached
                # TODO maybe we should disable cache, or we change self.linemod_dir to self.data_dir
                self.render_set=read_pickle(self.render_pkl)
            else:
                # process render set
                self.render_set=self.collect_render_set_info(self.render_pkl,self.render_dir)
        else:
            self.render_set=[]

        self.set_pkl=os.path.join(cfg.DATA_DIR,'posedb','{}.pkl'.format(cls_name))#self.linemod_dir
        if os.path.exists(self.set_pkl):
            # read cached
            self.set_pkl=read_pickle(self.set_pkl)
        else:
            # process real set
            self.set_pkl=self.collect_set_info()

        # prepare train test split
        self.train_real_set=[]
        self.test_real_set=[]
        self.val_real_set=[]
        self.collect_train_val_test_info()

        self.fuse_set=[]
        self.fuse_dir='fuse'
        self.fuse_num=fuse_num
        self.cls_idx=cfg.linemod_cls_names.index(cls_name)

        if has_fuse_set:
            self.fuse_pkl=os.path.join(cfg.DATA_DIR,'posedb','{}_fuse.pkl'.format(cls_name))#cfg.LINEMOD
            # prepare dataset
            if os.path.exists(self.fuse_pkl):
                # read cached
                self.fuse_set=read_pickle(self.fuse_pkl)
            else:
                # process render set
                self.fuse_set=self.collect_fuse_info()
        else:
            self.fuse_set=[]
Exemplo n.º 2
0
    def collect_ms_info(self):
        database=[]
        projector=Projector()
        model_db=LineModModelDB()
        for k in range(self.ms_num):
            data=dict()
            data['rgb_pth']=os.path.join(self.ms_dir, '{}.jpg'.format(k))
            data['dpt_pth']=os.path.join(self.ms_dir, '{}_{}_mask.png'.format(k,self.cls_name))

            # if too few foreground pts then continue
            mask=imread(os.path.join(self.linemod_dir,data['dpt_pth']))
            if np.sum(mask)<5: continue

            data['RT'] = read_pickle(os.path.join(self.linemod_dir, self.ms_dir, '{}_{}_RT.pkl'.format(self.cls_name,k)))['RT']
            data['cls_typ']=self.cls_name
            data['rnd_typ']='render_multi'
            data['corners']=projector.project(model_db.get_corners_3d(self.cls_name),data['RT'],'blender')
            data['farthest']=projector.project(model_db.get_farthest_3d(self.cls_name),data['RT'],'blender')
            for num in [4,12,16,20]:
                data['farthest{}'.format(num)]=projector.project(modeldb.get_farthest_3d(self.cls_name,num),data['RT'],'blender')
            data['center']=projector.project(model_db.get_centers_3d(self.cls_name)[None,:],data['RT'],'blender')
            data['small_bbox'] = projector.project(modeldb.get_small_bbox(self.cls_name), data['RT'], 'blender')
            axis_direct=np.concatenate([np.identity(3), np.zeros([3, 1])], 1).astype(np.float32)
            data['van_pts']=projector.project_h(axis_direct, data['RT'], 'blender')
            database.append(data)

        save_pickle(database,self.ms_pkl)
        return database
Exemplo n.º 3
0
    def collect_fuse_info(self):
        database=[]
        modeldb=LineModModelDB()
        projector=Projector()
        for k in range(self.fuse_num):
            data=dict()
            data['rgb_pth']=os.path.join(self.fuse_dir, '{}_rgb.jpg'.format(k))
            data['dpt_pth']=os.path.join(self.fuse_dir, '{}_mask.png'.format(k))

            # if too few foreground pts then continue
            mask=imread(os.path.join(self.linemod_dir,data['dpt_pth']))
            if np.sum(mask==(cfg.linemod_cls_names.index(self.cls_name)+1))<400: continue

            data['cls_typ']=self.cls_name
            data['rnd_typ']='fuse'
            begins,poses=read_pickle(os.path.join(self.linemod_dir,self.fuse_dir,'{}_info.pkl'.format(k)))
            data['RT'] = poses[self.cls_idx]
            K=projector.intrinsic_matrix['linemod'].copy()
            K[0,2]+=begins[self.cls_idx,1]
            K[1,2]+=begins[self.cls_idx,0]
            data['K']=K
            data['corners']=projector.project_K(modeldb.get_corners_3d(self.cls_name),data['RT'],K)
            data['center']=projector.project_K(modeldb.get_centers_3d(self.cls_name),data['RT'],K)
            data['farthest']=projector.project_K(modeldb.get_farthest_3d(self.cls_name),data['RT'],K)
            for num in [4,12,16,20]:
                data['farthest{}'.format(num)]=projector.project_K(modeldb.get_farthest_3d(self.cls_name,num),data['RT'],K)
            data['small_bbox'] = projector.project_K(modeldb.get_small_bbox(self.cls_name), data['RT'], K)
            database.append(data)

        save_pickle(database,self.fuse_pkl)
        return database
Exemplo n.º 4
0
    def collect_truncated_set_info(self):
        database=[]
        projector=Projector()
        modeldb=LineModModelDB()

        img_num=len(os.listdir(os.path.join(self.linemod_dir,self.cls_name,'JPEGImages')))
        for k in range(img_num):
            data={}
            data['rgb_pth']=os.path.join('truncated',self.cls_name,'{:06}_rgb.jpg'.format(k))
            data['dpt_pth']=os.path.join('truncated',self.cls_name,'{:04}_msk.png'.format(k))

            pose,K=read_pickle(os.path.join(self.linemod_dir,'truncated',self.cls_name,'{:06}_info.pkl'.format(k)))
            data['RT']=pose
            data['cls_typ']=self.cls_name
            data['rnd_typ']='truncated'
            data['corners']=projector.project_K(modeldb.get_corners_3d(self.cls_name),data['RT'],K)
            data['farthest']=projector.project_K(modeldb.get_farthest_3d(self.cls_name),data['RT'],K)
            for num in [4,12,16,20]:
                data['farthest{}'.format(num)]=projector.project_K(modeldb.get_farthest_3d(self.cls_name,num),data['RT'],K)
            data['small_bbox'] = projector.project_K(modeldb.get_small_bbox(self.cls_name), data['RT'], K)
            data['center']=projector.project_K(modeldb.get_centers_3d(self.cls_name)[None,:],data['RT'],K)
            # axis_direct=np.concatenate([np.identity(3), np.zeros([3, 1])], 1).astype(np.float32)
            # data['van_pts']=projector.project_h(axis_direct, data['RT'], K)
            data['K']=K
            database.append(data)

        save_pickle(database,self.pkl)
        return database
Exemplo n.º 5
0
def record_render_ann(model_meta, img_id, ann_id, images, annotations):
    data_root = model_meta['data_root']
    cls = model_meta['cls']
    split = model_meta['split']
    corner_3d = model_meta['corner_3d']
    center_3d = model_meta['center_3d']
    fps_3d = model_meta['fps_3d']
    K = model_meta['K']

    render_dir = os.path.join(data_root, 'renders', cls)
    ann_num = len(glob.glob(os.path.join(
        render_dir, '*.pkl')))  # 'data/linemod/renders/cat/*.pkl'
    K = blender_K
    for ind in tqdm.tqdm(range(ann_num)):
        img_name = '{}.jpg'.format(ind)
        rgb_path = os.path.join(render_dir,
                                img_name)  # 'data/linemod/renders/cat/xxx.jpg'
        rgb = Image.open(rgb_path)
        img_size = rgb.size
        img_id += 1
        info = {
            'file_name': rgb_path,
            'height': img_size[1],
            'width': img_size[0],
            'id': img_id
        }
        images.append(info)

        pose = read_pickle(os.path.join(render_dir,
                                        '{}_RT.pkl'.format(ind)))['RT']

        corner_2d = project(corner_3d, K, pose)
        center_2d = project(center_3d[None], K, pose)[0]
        fps_2d = project(fps_3d, K, pose)

        mask_path = os.path.join(render_dir, '{}_depth.png'.format(ind))

        ann_id += 1
        anno = {
            'mask_path': mask_path,
            'image_id': img_id,
            'category_id': 1,
            'id': ann_id
        }
        anno.update({
            'corner_3d': corner_3d.tolist(),
            'corner_2d': corner_2d.tolist()
        })
        anno.update({
            'center_3d': center_3d.tolist(),
            'center_2d': center_2d.tolist()
        })
        anno.update({'fps_3d': fps_3d.tolist(), 'fps_2d': fps_2d.tolist()})
        anno.update({'K': K.tolist(), 'pose': pose.tolist()})
        anno.update({'data_root': render_dir})
        anno.update({'type': 'render', 'cls': cls})
        annotations.append(anno)

    return img_id, ann_id
Exemplo n.º 6
0
def render_to_coco():
    data_root = 'data/tless/renders/'
    obj_ids = [i + 1 for i in range(30)]

    img_id = 0
    ann_id = 0
    images = []
    annotations = []

    for obj_id in tqdm.tqdm(obj_ids):
        obj_dir = os.path.join(data_root, str(obj_id))
        pkl_paths = glob.glob(os.path.join(obj_dir, '*.pkl'))
        for pkl_path in tqdm.tqdm(pkl_paths):
            rgb_path = pkl_path.replace('_RT.pkl', '.png')
            mask_path = pkl_path.replace('_RT.pkl', '_depth.png')

            if not os.path.exists(rgb_path) or not os.path.exists(mask_path):
                continue

            rgb = Image.open(rgb_path)
            img_size = rgb.size
            img_id += 1
            info = {
                'rgb_path': rgb_path,
                'height': img_size[1],
                'width': img_size[0],
                'id': img_id
            }
            images.append(info)

            K_P = read_pickle(pkl_path)

            ann_id += 1
            anno = {
                'mask_path': mask_path,
                'image_id': img_id,
                'category_id': obj_id,
                'id': ann_id
            }
            anno.update({'K': K_P['K'].tolist(), 'pose': K_P['RT'].tolist()})
            annotations.append(anno)

    categories = [{
        'supercategory': 'none',
        'id': obj_id,
        'name': str(obj_id)
    } for obj_id in obj_ids]
    instance = {
        'images': images,
        'annotations': annotations,
        'categories': categories
    }
    anno_path = os.path.join(data_root, 'render.json')
    with open(anno_path, 'w') as f:
        json.dump(instance, f)
Exemplo n.º 7
0
def _handle_render_train_symmetry_pose(obj_id):
    data_root = 'data/tless'
    render_dir = os.path.join(data_root, 'renders', str(obj_id))

    ann_num = len(glob.glob(os.path.join(render_dir, '*.pkl')))
    for ind in tqdm.tqdm(range(ann_num)):
        pkl_path = os.path.join(render_dir, '{}_RT.pkl'.format(ind))
        K_P = read_pickle(pkl_path)
        pose = K_P['RT']
        symmetry_R = symmetry_utils.TLESS_rectify(obj_id, pose[:, :3])
        K_P['s_RT'] = np.concatenate([symmetry_R, pose[:, 3:]], axis=1)
        save_pickle(K_P, pkl_path)
Exemplo n.º 8
0
 def get_dataset(num=10):
     dataset=[]
     projector=Projector()
     modeldb=LineModModelDB()
     for k in range(num):
         data={}
         data['rgb_pth']='special/duck/{}.jpg'.format(k)
         data['dpt_pth']='special/duck/{}_depth.png'.format(k)
         data['RT']=read_pickle(os.path.join(cfg.LINEMOD,'special/duck/{}_RT.pkl'.format(k)))['RT']
         data['center']=projector.project(modeldb.get_centers_3d('duck'),data['RT'],'blender')
         data['rnd_typ']='render'
         dataset.append(data)
     return dataset
Exemplo n.º 9
0
    def validate_pose(self):
        rgb_path = '/home/pengsida/Datasets/YCB/renders/{}/0.jpg'.format(self.class_type)
        pose_path = '/home/pengsida/Datasets/YCB/renders/{}/0_RT.pkl'.format(self.class_type)
        model_path = '/home/pengsida/Datasets/YCB/models/{}/points.xyz'.format(self.class_type)

        img = np.array(Image.open(rgb_path))
        pose = read_pickle(pose_path)['RT']
        model_3d = np.loadtxt(model_path)
        model_2d = self.projector.project(model_3d, pose, 'blender')
        import matplotlib.pyplot as plt
        plt.imshow(img)
        plt.plot(model_2d[:, 0], model_2d[:, 1], 'r.')
        plt.show()
Exemplo n.º 10
0
    def __init__(self,cls_name):
        self.cls_name=cls_name

        # some dirs for processing
        self.linemod_dir=cfg.LINEMOD

        self.pkl=os.path.join(self.linemod_dir,'posedb','{}_truncated.pkl'.format(cls_name))
        if os.path.exists(self.pkl):
            # read cached
            self.set=read_pickle(self.pkl)
        else:
            # process real set
            self.set=self.collect_truncated_set_info()
Exemplo n.º 11
0
    def get_plane_height(self):
        if os.path.exists(self.plane_height_path):
            plane_height = read_pickle(self.plane_height_path)
        else:
            plane_height = {}

        if self.class_type in plane_height:
            return plane_height[self.class_type]
        else:
            pose_transformer = PoseTransformer(self.class_type)
            model = pose_transformer.get_blender_model()
            height = np.min(model[:, -1])
            plane_height[self.class_type] = height
            save_pickle(plane_height, self.plane_height_path)
            return height
Exemplo n.º 12
0
def get_rendering_model(data_root):
    rendering_model_path = os.path.join(data_root, 'rendering_model.pkl')
    if os.path.exists(rendering_model_path):
        return base_utils.read_pickle(rendering_model_path)

    cad_path_pattern = os.path.join(data_root, 'obj_{:03}.ply')
    obj_ids = [i + 1 for i in range(30)]
    models = {}
    for obj_id in tqdm.tqdm(obj_ids):
        cad_path = cad_path_pattern.format(obj_id)
        model = opengl_renderer.load_ply(cad_path)
        models.update({obj_id: model})
    base_utils.save_pickle(models, rendering_model_path)

    return models
Exemplo n.º 13
0
def record_fuse_ann(model_meta, img_id, ann_id, images, annotations):
    data_root = model_meta['data_root']
    cls = model_meta['cls']
    split = model_meta['split']
    corner_3d = model_meta['corner_3d']
    center_3d = model_meta['center_3d']
    fps_3d = model_meta['fps_3d']
    K = model_meta['K']

    fuse_dir = os.path.join(data_root, 'fuse')
    original_K = linemod_K
    ann_num = len(glob.glob(os.path.join(fuse_dir, '*.pkl')))
    cls_idx = linemod_cls_names.index(cls)
    for ind in tqdm.tqdm(range(ann_num)):
        mask_path = os.path.join(fuse_dir, '{}_mask.png'.format(ind))
        mask_real = read_mask(mask_path, 'fuse', cls_idx + 1)
        if (np.sum(mask_real) < 400):
            continue

        img_name = '{}_rgb.jpg'.format(ind)
        rgb_path = os.path.join(fuse_dir, img_name)
        rgb = Image.open(rgb_path)
        img_size = rgb.size
        img_id += 1
        info = {'file_name': rgb_path, 'height': img_size[1], 'width': img_size[0], 'id': img_id}

        begins, poses = read_pickle(os.path.join(fuse_dir, '{}_info.pkl'.format(ind)))
        pose = poses[cls_idx]
        K = original_K.copy()
        K[0, 2] += begins[cls_idx, 1]
        K[1, 2] += begins[cls_idx, 0]

        corner_2d = project(corner_3d, K, pose)
        center_2d = project(center_3d[None], K, pose)[0]
        fps_2d = project(fps_3d, K, pose)

        ann_id += 1
        anno = {'mask_path': mask_path, 'image_id': img_id, 'category_id': 1, 'id': ann_id}
        anno.update({'corner_3d': corner_3d.tolist(), 'corner_2d': corner_2d.tolist()})
        anno.update({'center_3d': center_3d.tolist(), 'center_2d': center_2d.tolist()})
        anno.update({'fps_3d': fps_3d.tolist(), 'fps_2d': fps_2d.tolist()})
        anno.update({'K': K.tolist(), 'pose': pose.tolist()})
        anno.update({'data_root': fuse_dir})
        anno.update({'type': 'fuse', 'cls': cls})
        annotations.append(anno)
        images.append(info)

    return img_id, ann_id
Exemplo n.º 14
0
 def getval_dataset(num=15):
     dataset = []
     projector = Projector()
     modeldb = HomemadeModelDB()
     source_dir = '/media/volvomlp2/03C796544677EF72/BBB/HOMEMADE/ladderframe/validation/'
     for k in range(3482, 3482 + num):
         print(k)
         data = {}
         data['rgb_pth'] = os.path.join(source_dir, '{}.jpg'.format(k))
         data['dpt_pth'] = os.path.join(source_dir,
                                        '{}_depth.png'.format(k))
         data['RT'] = read_pickle(
             os.path.join(source_dir, '{}_RT.pkl'.format(k)))['RT']
         data['center'] = projector.project(
             modeldb.get_centers_3d('intake'), data['RT'], 'blender')
         data['rnd_typ'] = 'render'
         dataset.append(data)
     return dataset
Exemplo n.º 15
0
    def __init__(self,cls_name):
        self.cls_name=cls_name

        # some dirs for processing
        self.linemod_dir=cfg.OCCLUSION_LINEMOD
        self.rgb_dir='RGB-D/rgb_noseg'
        self.mask_dir='masks/{}'.format(cls_name)
        self.rt_dir=os.path.join(self.linemod_dir,'poses/{}{}'.format(cls_name[0].upper(),cls_name[1:]))

        self.real_pkl=os.path.join(self.linemod_dir,'posedb','{}_real.pkl'.format(cls_name))
        if os.path.exists(self.real_pkl):
            # read cached
            self.real_set=read_pickle(self.real_pkl)
        else:
            # process real set
            self.real_set=self.collect_real_set_info()

        self.test_real_set=[]
        self.train_real_set=[]
        self.get_train_test_split()
Exemplo n.º 16
0
    def collect_render_set_info(self,pkl_file,render_dir,format='jpg'):
        database=[]    # blender standard
        projector=Projector()
        modeldb=LineModModelDB()
        for k in range(self.render_num):
            data={}
            data['rgb_pth']=os.path.join(render_dir,'{}.{}'.format(k,format))
            data['RT']=read_pickle(os.path.join(self.linemod_dir,render_dir,'{}_RT.pkl'.format(k)))['RT']
            data['cls_typ']=self.cls_name
            data['rnd_typ']='render'
            data['corners']=projector.project(modeldb.get_corners_3d(self.cls_name),data['RT'],'blender')
            data['farthest']=projector.project(modeldb.get_farthest_3d(self.cls_name),data['RT'],'blender')
            data['center']=projector.project(modeldb.get_centers_3d(self.cls_name)[None,:],data['RT'],'blender')
            for num in [4,12,16,20]:
                data['farthest{}'.format(num)]=projector.project(modeldb.get_farthest_3d(self.cls_name,num),data['RT'],'blender')
            data['small_bbox'] = projector.project(modeldb.get_small_bbox(self.cls_name), data['RT'], 'blender')
            axis_direct=np.concatenate([np.identity(3), np.zeros([3, 1])], 1).astype(np.float32)
            data['van_pts']=projector.project_h(axis_direct, data['RT'], 'blender')
            database.append(data)

        save_pickle(database,pkl_file)
        return database
Exemplo n.º 17
0
    def collect_val_render(self, pkl_file, render_dir, format='jpg'):
        database = []
        projector = Projector()
        modeldb = HomemadeModelDB()
        for k in range(3482, 3499):
            data = {}
            print(os.path.join(self.render_val_dir, '{}.{}'.format(k, format)))
            data['rgb_pth'] = os.path.join(self.render_val_dir,
                                           '{}.{}'.format(k, format))
            data['dpt_pth'] = os.path.join(self.render_val_dir,
                                           '{}_depth.png'.format(k))
            data['RT'] = read_pickle(
                os.path.join(self.homemade_dir, self.render_val_dir,
                             '{}_RT.pkl'.format(k)))['RT']
            data['cls_typ'] = self.cls_name
            data['rnd_typ'] = 'render'
            data['corners'] = projector.project(
                modeldb.get_corners_3d(self.cls_name), data['RT'], 'blender')
            data['farthest'] = projector.project(
                modeldb.get_farthest_3d(self.cls_name), data['RT'], 'blender')
            data['center'] = projector.project(
                modeldb.get_centers_3d(self.cls_name)[None, :], data['RT'],
                'blender')
            for num in [4, 12, 16, 20]:
                data['farthest{}'.format(num)] = projector.project(
                    modeldb.get_farthest_3d(self.cls_name, num), data['RT'],
                    'blender')
            data['small_bbox'] = projector.project(
                modeldb.get_small_bbox(self.cls_name), data['RT'], 'blender')
            axis_direct = np.concatenate(
                [np.identity(3), np.zeros([3, 1])], 1).astype(np.float32)
            data['van_pts'] = projector.project_h(axis_direct, data['RT'],
                                                  'blender')
            database.append(data)
        print("collectval successful?: length = ", len(database))

        save_pickle(database, pkl_file)
        return database
Exemplo n.º 18
0
    def __init__(self,
                 cls_name,
                 render_num=4000,
                 fuse_num=0,
                 ms_num=0,
                 has_render_set=True,
                 has_fuse_set=True):
        self.cls_name = cls_name

        # some dirs for processing
        os.path.join(cfg.HOMEMADE, 'posedb', '{}_render.pkl'.format(cls_name))
        self.homemade_dir = cfg.HOMEMADE
        self.render_dir = 'renders/{}'.format(cls_name)
        self.rgb_dir = '{}/JPEGImages'.format(cls_name)
        self.mask_dir = '{}/mask'.format(cls_name)
        self.rt_dir = os.path.join(cfg.DATA_DIR, 'HOMEMADE', cls_name, 'pose')
        self.render_num = render_num
        self.render_val_dir = 'renders/{}/validation'.format(cls_name)
        self.test_fn = '{}/test.txt'.format(cls_name)
        self.train_fn = '{}/train.txt'.format(cls_name)
        self.val_fn = '{}/val.txt'.format(cls_name)

        if has_render_set:
            self.render_pkl = os.path.join(self.homemade_dir, 'posedb',
                                           '{}_render.pkl'.format(cls_name))
            # prepare dataset
            if os.path.exists(self.render_pkl):
                # read cached
                self.render_set = read_pickle(self.render_pkl)
            else:
                # process render set
                self.render_set = self.collect_render_set_info(
                    self.render_pkl, self.render_dir)
                #self.render_val_set=self.collect_val_render(self.render_val_pkl,self.render_val_dir)
        else:
            self.render_set = []

        self.real_pkl = os.path.join(self.homemade_dir, 'posedb',
                                     '{}_real.pkl'.format(cls_name))
        if os.path.exists(self.real_pkl):
            # read cached
            self.real_set = read_pickle(self.real_pkl)
        if False:  #else:
            # process real set
            self.real_set = self.collect_real_set_info()

        # prepare train test split
        self.train_real_set = []
        self.test_real_set = []
        self.val_real_set = []
        #self.collect_train_val_test_info()

        self.fuse_set = []
        self.fuse_dir = 'fuse'
        self.fuse_num = fuse_num
        self.cls_idx = cfg.homemade_cls_names.index(cls_name)

        if has_fuse_set:
            self.fuse_pkl = os.path.join(cfg.HOMEMADE, 'posedb',
                                         '{}_fuse.pkl'.format(cls_name))
            # prepare dataset
            if os.path.exists(self.fuse_pkl):
                # read cached
                self.fuse_set = read_pickle(self.fuse_pkl)
            else:
                # process render set
                self.fuse_set = self.collect_fuse_info()
        else:
            self.fuse_set = []
Exemplo n.º 19
0
def test_pose_to_coco():
    data_root = 'data/tless/test_primesense'
    scene_ids = [i + 1 for i in range(20)]
    models = get_rendering_model('data/tless/models_cad')
    corner_3d = {
        i: get_model_corners(v['pts']) / 1000.
        for i, v in models.items()
    }
    center_3d = {
        i: (np.max(v, 0) + np.min(v, 0)) / 2
        for i, v in corner_3d.items()
    }
    fps_3d = {
        i + 1:
        np.loadtxt('data/tless/farthest/farthest_{:02}.txt'.format(i + 1))
        for i in range(0, 30)
    }

    model_meta = {
        'corner_3d': corner_3d,
        'center_3d': center_3d,
        'fps_3d': fps_3d,
    }

    img_id = 0
    ann_id = 0
    images = []
    annotations = []

    for scene_id in tqdm.tqdm(scene_ids):
        scene_dir = os.path.join(data_root, '{:02}'.format(scene_id))
        rgb_dir = os.path.join(scene_dir, 'rgb')
        rgb_paths = glob.glob(os.path.join(rgb_dir, '*.png'))
        gt = yaml.load(open(os.path.join(scene_dir, 'gt.yml')))
        K_info = yaml.load(open(os.path.join(scene_dir, 'info.yml')))
        a_pixel_num_dict = base_utils.read_pickle(
            os.path.join(scene_dir, 'pixel_num.pkl'))
        pose_meta = {
            'rgb_paths': rgb_paths,
            'gt': gt,
            'K': K_info,
            'a_pixel_num': a_pixel_num_dict
        }

        img_id, ann_id = record_scene_ann(model_meta, pose_meta, img_id,
                                          ann_id, images, annotations)

    obj_ids = [i + 1 for i in range(30)]
    categories = [{
        'supercategory': 'none',
        'id': obj_id,
        'name': str(obj_id)
    } for obj_id in obj_ids]
    instance = {
        'images': images,
        'annotations': annotations,
        'categories': categories
    }

    data_cache_dir = 'data/cache/tless_pose/'
    os.system('mkdir -p {}'.format(data_cache_dir))
    anno_path = os.path.join(data_cache_dir, 'test.json')
    with open(anno_path, 'w') as f:
        json.dump(instance, f)
Exemplo n.º 20
0
def read_anns(ann_files):
    anns = []
    for ann_file in ann_files:
        anns += read_pickle(ann_file)
    return anns
    def __init__(self,
                 cls_name,
                 render_num=10000,
                 fuse_num=10000,
                 ms_num=10000,
                 has_render_set=True,
                 has_fuse_set=True):
        self.cls_name = cls_name

        # some dirs for processing
        os.path.join(cfg.LINEMOD, 'posedb', '{}_render.pkl'.format(cls_name))
        self.linemod_dir = cfg.LINEMOD
        self.render_dir = 'renders/{}'.format(cls_name)
        self.rgb_dir = '{}/JPEGImages'.format(cls_name)
        self.mask_dir = '{}/mask'.format(cls_name)
        self.rt_dir = os.path.join(cfg.DATA_DIR, 'LINEMOD_ORIG', cls_name,
                                   'data')
        self.render_num = render_num

        self.test_fn = '{}/test.txt'.format(cls_name)
        self.train_fn = '{}/train.txt'.format(cls_name)
        self.val_fn = '{}/val.txt'.format(cls_name)

        #自己造数据 后面在看
        if has_render_set:
            self.render_pkl = os.path.join(self.linemod_dir, 'posedb',
                                           '{}_render.pkl'.format(cls_name))
            # prepare dataset
            if os.path.exists(self.render_pkl):
                # read cached
                self.render_set = read_pickle(self.render_pkl)
            else:
                # process render set
                self.render_set = self.collect_render_set_info(
                    self.render_pkl, self.render_dir)
        else:
            self.render_set = []

        self.real_pkl = os.path.join(self.linemod_dir, 'posedb',
                                     '{}_real.pkl'.format(cls_name))
        # 关键,若没有 pose.pkl, 生成
        if os.path.exists(self.real_pkl):
            # read cached
            self.real_set = read_pickle(self.real_pkl)
        else:
            # process real set  重要,拿到针对pvnet的linemod数据集 pose 真值  driller_real.pkl
            # 但同时,也生成一个 driller_fuse.pkl
            self.real_set = self.collect_real_set_info()

        # prepare train test split
        self.train_real_set = []
        self.test_real_set = []
        self.val_real_set = []
        self.collect_train_val_test_info()

        self.fuse_set = []
        self.fuse_dir = 'fuse'
        self.fuse_num = fuse_num
        self.cls_idx = cfg.linemod_cls_names.index(cls_name)

        if has_fuse_set:
            self.fuse_pkl = os.path.join(cfg.LINEMOD, 'posedb',
                                         '{}_fuse.pkl'.format(cls_name))
            # prepare dataset
            if os.path.exists(self.fuse_pkl):
                # read cached
                self.fuse_set = read_pickle(self.fuse_pkl)
            else:
                # process render set
                self.fuse_set = self.collect_fuse_info()
        else:
            self.fuse_set = []
Exemplo n.º 22
0
def test_to_coco():
    data_root = 'data/tless/test_primesense'
    scene_ids = [i + 1 for i in range(20)]

    img_id = 0
    ann_id = 0
    images = []
    annotations = []

    for scene_id in tqdm.tqdm(scene_ids):
        scene_dir = os.path.join(data_root, '{:02}'.format(scene_id))
        rgb_dir = os.path.join(scene_dir, 'rgb')
        rgb_paths = glob.glob(os.path.join(rgb_dir, '*.png'))
        gt = yaml.load(open(os.path.join(scene_dir, 'gt.yml')))
        a_pixel_num_dict = base_utils.read_pickle(
            os.path.join(scene_dir, 'pixel_num.pkl'))
        for rgb_path in tqdm.tqdm(rgb_paths):
            rgb = Image.open(rgb_path)
            img_size = rgb.size
            img_id += 1
            info = {
                'rgb_path': rgb_path,
                'height': img_size[1],
                'width': img_size[0],
                'id': img_id
            }
            images.append(info)

            gt_ = gt[int(os.path.basename(rgb_path).replace('.png', ''))]
            mask_path = rgb_path.replace('rgb', 'mask')
            mask = np.array(Image.open(mask_path))
            a_pixel_nums = a_pixel_num_dict[int(
                os.path.basename(rgb_path).replace('.png', ''))]
            for instance_id, instance_gt in enumerate(gt_):
                obj_id = instance_gt['obj_id']
                mask_id = obj_id * 1000 + instance_id
                mask_ = (mask == mask_id).astype(np.uint8)
                pixel_num = np.sum(mask_)
                a_pixel_num = a_pixel_nums[instance_id]

                if pixel_num / a_pixel_num < tless_config.visib_gt_min:
                    continue

                ann_id += 1
                bbox = cv2.boundingRect(mask_)
                area = int(np.sum(mask_.astype(np.uint8)))
                anno = {
                    'area': area,
                    'image_id': img_id,
                    'bbox': bbox,
                    'iscrowd': 0,
                    'category_id': obj_id,
                    'id': ann_id
                }
                annotations.append(anno)

    obj_ids = [i + 1 for i in range(30)]
    categories = [{
        'supercategory': 'none',
        'id': obj_id,
        'name': str(obj_id)
    } for obj_id in obj_ids]
    instance = {
        'images': images,
        'annotations': annotations,
        'categories': categories
    }
    anno_path = os.path.join(data_root, 'test.json')
    with open(anno_path, 'w') as f:
        json.dump(instance, f)
Exemplo n.º 23
0
 def read_blender_pose(self, index):
     pose_path = self.pose_pattern.format(self.class_type, index)
     return read_pickle(pose_path)['RT']
Exemplo n.º 24
0

def make_bg_fn():
    from skimage.io import imsave
    imsave('data/render_sym_valid/background.jpg',
           np.full([1024, 1024, 3], 127, np.uint8))


if __name__ == "__main__":
    import sys
    sys.path.append('.')
    from lib.utils.base_utils import read_pickle
    obj_id, sample_num = 17, 5
    render_data(obj_id, sample_num)
    for k in range(sample_num):
        RT0 = read_pickle(f'data/render_sym_valid/{k}_RT.pkl')['RT']
        RT1 = read_pickle(f'data/render_sym_valid/{k+5}_RT.pkl')['RT']
        R0 = TLESS_rectify(obj_id, RT0[:, :3])
        R1 = TLESS_rectify(obj_id, RT1[:, :3])
        assert (np.mean(np.abs(R0 - R1)) < 1e-6)
        import matplotlib.pyplot as plt
        plt.subplot(121)
        plt.imshow(imread(f'data/render_sym_valid/{k}.png'))
        plt.subplot(122)
        plt.imshow(imread(f'data/render_sym_valid/{k+5}.png'))
        plt.show()

    validate_rectification_implementation(gen_axis_group(60, 2))
    validate_rectification_implementation(gen_axis_group(180, 2))
    validate_rectification_implementation(gen_axis_group(90, 2))
    validate_rectification_implementation(gen_axis_group(180, 1))