コード例 #1
0
def save_scene(scene_id):
    """
    Saves the specified scene.
    """
    global scene_info
    global scene_gt

    # Collect poses expressed in the world coordinate system
    ref_obj_poses = []
    for obj in bpy.data.objects:
        if is_obj(obj.name):
            obj_id = obj.name.split('obj_')[1].split('.')[0] # Get object ID
            e, t = get_pose(obj.name)
            R = transform.euler_matrix(e[0], e[1], e[2], axes='sxyz')[:3, :3]
            ref_obj_poses.append({
                'obj_id': int(obj_id),
                'cam_R_m2w': R,
                'cam_t_m2w': np.array(t).reshape((3, 1))
            })

    # Load models of objects present in the scene
    obj_ids = set([p['obj_id'] for p in ref_obj_poses])
    models = {}
    for obj_id in obj_ids:
        models[obj_id] = inout.load_ply(par.model_mpath.format(obj_id))

    # Transform the poses to the camera coordinate systems using the known
    # camera-to-world transformations
    for im_id in scene_gt.keys():
        scene_gt[im_id] = []
        K = scene_info[im_id]['cam_K']
        R_w2c = scene_info[im_id]['cam_R_w2c']
        t_w2c = scene_info[im_id]['cam_t_w2c']
        for pose in ref_obj_poses:
            R_m2c_new = R_w2c.dot(pose['cam_R_m2w'])
            t_m2c_new = R_w2c.dot(pose['cam_t_m2w']) + t_w2c

            # Get 2D bounding box of the projection of the object model at
            # the refined ground truth pose
            pts_im = misc.project_pts(models[int(obj_id)]['pts'], K,
                                      R_m2c_new, t_m2c_new)
            pts_im = np.round(pts_im).astype(np.int)
            ys, xs = pts_im[:, 1], pts_im[:, 0]
            obj_bb = misc.calc_2d_bbox(xs, ys, par.test_im_size)

            scene_gt[im_id].append({
                'obj_id': int(obj_id),
                'obj_bb': obj_bb,
                'cam_R_m2c': R_m2c_new,
                'cam_t_m2c': t_m2c_new
            })

    # Save the updated ground truth poses
    scene_gt_path = par.scene_gt_mpath.format(scene_id)
    print('Saving GT poses: ' + scene_gt_path)
    inout.save_gt(scene_gt_path, scene_gt)
コード例 #2
0
    scene_info = {}
    scene_gt = {}

    # Prepare folders
    misc.ensure_dir(os.path.dirname(rgb_out_mpath.format(scene_id, 0)))
    misc.ensure_dir(os.path.dirname(depth_out_mpath.format(scene_id, 0)))

    # Get list of image IDs - consider only images for which the fixed GT
    # is available
    poses_fpaths = glob.glob(os.path.join(
        os.path.dirname(pose_mpath.format(scene_id, 0)), '*'))
    im_ids = sorted([int(e.split('poses')[1].split('.txt')[0]) for e in poses_fpaths])

    # Load object model
    obj_id = scene_id  # The object id is the same as scene id for this dataset
    model = inout.load_ply(model_mpath.format(obj_id))

    # Transformation which was applied to the object models (its inverse will
    # be applied to the GT poses):
    # 1) Translate the bounding box center to the origin
    t_model = bbox_cens[obj_id - 1, :].reshape((3, 1))

    im_id_out = 0
    for im_id in im_ids:
        # if im_id % 10 == 0:
        print('scene,view: ' + str(scene_id) + ',' + str(im_id))

        # Load the RGB and depth image
        rgb = inout.read_im(rgb_in_mpath.format(scene_id, im_id))
        depth = inout.read_depth(depth_in_mpath.format(scene_id, im_id))
コード例 #3
0
import os
import sys
import yaml

sys.path.append(os.path.abspath('..'))
from pysixdb import inout

model_mpath = '/local/datasets/tlod/imperial/tejani/models/obj_{:02d}.ply'
bbox_cens_path = '../output/tejani_bbox_cens.yml'  # File to save bbox centers

bbox_cens = []
obj_ids = range(1, 7)
for obj_id in obj_ids:
    print('Processing obj: ' + str(obj_id))
    model_path = model_mpath.format(obj_id)
    model = inout.load_ply(model_path)

    # Scale
    model['pts'] *= 1000.0  # Convert to [mm]

    # Translate the bounding box center to the origin
    bb_cen = 0.5 * (model['pts'].min(axis=0) + model['pts'].max(axis=0))
    model['pts'] -= bb_cen
    bbox_cens.append(bb_cen.flatten().tolist())

    # Save the transformed model
    inout.save_ply(model_path, model['pts'], model['colors'], model['normals'],
                   model['faces'])

with open(bbox_cens_path, 'w') as f:
    yaml.dump(bbox_cens, f, width=10000)
コード例 #4
0
vis_depth_mpath = '../output/vis_gt_poses/{:02d}_{:04d}_depth_diff.jpg'
misc.ensure_dir(os.path.dirname(vis_rgb_mpath))

scene_ids_curr = range(1, par.scene_count + 1)
if scene_ids:
    scene_ids_curr = set(scene_ids_curr).intersection(scene_ids)
for scene_id in scene_ids_curr:
    # Load scene info and gt poses
    scene_info = inout.load_info(par.scene_info_mpath.format(scene_id))
    scene_gt = inout.load_gt(par.scene_gt_mpath.format(scene_id))

    # Load models of objects that appear in the current scene
    obj_ids = set([gt['obj_id'] for gts in scene_gt.values() for gt in gts])
    models = {}
    for obj_id in obj_ids:
        models[obj_id] = inout.load_ply(par.model_mpath.format(obj_id))

    # Visualize GT poses in the selected images
    im_ids_curr = sorted(scene_info.keys())
    if im_ids:
        im_ids_curr = set(im_ids_curr).intersection(im_ids)
    for im_id in im_ids_curr:
        print('scene: {}, im: {}'.format(scene_id, im_id))

        # Load the images
        rgb = inout.read_im(par.test_rgb_mpath.format(scene_id, im_id))
        depth = inout.read_depth(par.test_depth_mpath.format(scene_id, im_id))
        depth = depth.astype(np.float)  # [mm]
        depth *= par.cam['depth_scale']  # to [mm]

        # Render the objects at the ground truth poses