Exemple #1
0
                # Normalize the line (sometimes comma and sometimes
                # whitespace is used as a separator)
                line = lines[1 + 4 * mat_id + y].lstrip().rstrip().replace(',', ' ')
                mat[y, :] = np.array(map(float, line.split()))
            poses.append({'R': mat[:3, :3], 't': mat[:3, 3].reshape((3, 1))})
    return poses

with open(bbox_cens_path, 'r') as f:
    bbox_cens = np.array(yaml.load(f))

for scene_id in scene_ids:
    scene_info = {}
    scene_gt = {}

    # Prepare folders
    misc.ensure_dir(os.path.dirname(rgb_out_mpath.format(scene_id, 0)))
    misc.ensure_dir(os.path.dirname(depth_out_mpath.format(scene_id, 0)))

    # Get list of image IDs - consider only images for which the fixed GT
    # is available
    poses_fpaths = glob.glob(os.path.join(
        os.path.dirname(pose_mpath.format(scene_id, 0)), '*'))
    im_ids = sorted([int(e.split('poses')[1].split('.txt')[0]) for e in poses_fpaths])

    # Load object model
    obj_id = scene_id  # The object id is the same as scene id for this dataset
    model = inout.load_ply(model_mpath.format(obj_id))

    # Transformation which was applied to the object models (its inverse will
    # be applied to the GT poses):
    # 1) Translate the bounding box center to the origin
base_path = '/local/datasets/tlod/hinterstoisser/'

in_obj_info_mpath = base_path + 'train_400-500/{:02d}/info.yml'
in_obj_gt_mpath = base_path + 'train_400-500/{:02d}/gt.yml'
in_rgb_mpath = base_path + 'train_400-500/{:02d}/rgb/{:04d}.png'
in_depth_mpath = base_path + 'train_400-500/{:02d}/depth/{:04d}.png'

out_obj_info_mpath = base_path + 'train/{:02d}/info.yml'
out_obj_gt_mpath = base_path + 'train/{:02d}/gt.yml'
out_rgb_mpath = base_path + 'train/{:02d}/rgb/{:04d}.png'
out_depth_mpath = base_path + 'train/{:02d}/depth/{:04d}.png'

for obj_id in obj_ids:
    # Prepare folders
    misc.ensure_dir(os.path.dirname(out_rgb_mpath.format(obj_id, 0)))
    misc.ensure_dir(os.path.dirname(out_depth_mpath.format(obj_id, 0)))

    # Load object info and gt
    with open(in_obj_info_mpath.format(obj_id), 'r') as f:
        in_obj_info = yaml.load(f, Loader=yaml.CLoader)
    with open(in_obj_gt_mpath.format(obj_id), 'r') as f:
        in_obj_gt = yaml.load(f, Loader=yaml.CLoader)

    out_obj_info = {}
    out_obj_gt = {}
    for im_id in sorted(in_obj_info.keys()):
        if im_id not in select_im_ids:
            continue

        shutil.copyfile(in_rgb_mpath.format(obj_id, im_id),
# Author: Tomas Hodan ([email protected])
# Center for Machine Perception, Czech Technical University in Prague

# Example of sampling views from a view sphere.

import os
import sys

sys.path.append(os.path.abspath('..'))
from pysixdb import view_sampler, misc

min_n_views = 642
radius = 1
hemisphere = False

out_views_vis_path = '../output/view_sphere.ply'

misc.ensure_dir(os.path.dirname(out_views_vis_path))

# Sample views
views, views_level = view_sampler.sample_views(min_n_views, radius, hemisphere)
print('Sampled views: ' + str(len(views)))

view_sampler.save_vis(out_views_vis_path, views)
import shutil
import cPickle as pickle
import numpy as np
import cv2

sys.path.append(os.path.abspath('..'))
from pysixdb import misc

stats_path = '/home/tom/th_data/cmp/projects/sixdb/sixdb_toolkit/output/eval_gt/rutgers_stats.p'
vis_gt_mpath = '/home/tom/th_data/cmp/projects/sixdb/sixdb_toolkit/output/vis_gt_poses_rutgers/{:02d}_{:04d}.jpg'
depth_diff_mpath = '/home/tom/th_data/cmp/projects/sixdb/sixdb_toolkit/output/eval_gt/rutgers/{:02d}_{:04d}_{:02d}_depth_diff.jpg'
rgb_mpath = '/local/datasets/tlod/rutgers/frank/test/{:02d}/rgb/{:04d}.png'
depth_mpath = '/local/datasets/tlod/rutgers/frank/test/{:02d}/depth/{:04d}.png'

out_path = '/home/tom/th_data/cmp/projects/sixdb/sixdb_toolkit/output/eval_gt_analysis'
misc.ensure_dir(out_path)
out_mbasename = '{:04d}_scene={}_im={}'
out_rgb_mpath = os.path.join(out_path, '{}_color.jpg')
out_depth_mpath = os.path.join(out_path, '{}_depth.jpg')
out_vis_gt_mpath = os.path.join(out_path, '{}_vis_gt.jpg')
out_depth_diff_mpath = os.path.join(out_path, '{}_depth_diff-mean={:.2f}_std={:.2f}_valid={:.2f}.jpg')

print('Loading stats...')
with open(stats_path, 'r') as f:
    stats = pickle.load(f)
print('stats loaded.')

# Analyse the depth differences
#-------------------------------------------------------------------------------
# Fraction of depth differences to be trimmed - to avoid erroneous depth
# measurements and to some extend also occlusion
Exemple #5
0
# Indicates whether to render RGB image
vis_rgb = True

# Indicates whether to resolve visibility in the rendered RGB image (using
# depth renderings). If True, only the part of object surface, which is not
# occluded by any other modeled object, is visible. If False, RGB renderings
# of individual objects are blended together.
vis_rgb_resolve_visib = False

# Indicates whether to render depth image
vis_depth = False

# Path masks for output images
vis_rgb_mpath = '../output/vis_gt_poses/{:02d}_{:04d}.jpg'
vis_depth_mpath = '../output/vis_gt_poses/{:02d}_{:04d}_depth_diff.jpg'
misc.ensure_dir(os.path.dirname(vis_rgb_mpath))

scene_ids_curr = range(1, par.scene_count + 1)
if scene_ids:
    scene_ids_curr = set(scene_ids_curr).intersection(scene_ids)
for scene_id in scene_ids_curr:
    # Load scene info and gt poses
    scene_info = inout.load_info(par.scene_info_mpath.format(scene_id))
    scene_gt = inout.load_gt(par.scene_gt_mpath.format(scene_id))

    # Load models of objects that appear in the current scene
    obj_ids = set([gt['obj_id'] for gts in scene_gt.values() for gt in gts])
    models = {}
    for obj_id in obj_ids:
        models[obj_id] = inout.load_ply(par.model_mpath.format(obj_id))