Beispiel #1
0
out_obj_info_path = '../output/' + dataset + '/{:02d}/info.yml'
out_obj_gt_path = '../output/' + dataset + '/{:02d}/gt.yml'
out_views_vis_mpath = '../output/' + dataset + '/views_radius={}.ply'
out_train_text_path = '../output/' + dataset + '/{:02d}/train.txt'
out_labels_path = '../output/' + dataset + '/{:02d}/labels/{:04d}.txt'

# Prepare output folder
# misc.ensure_dir(os.path.dirname(out_obj_info_path))

# Image size and K for SSAA
im_size_rgb = [int(round(x * float(ssaa_fact))) for x in par['cam']['im_size']]
K_rgb = par['cam']['K'] * ssaa_fact

for obj_id in obj_ids:
    # Prepare folders
    misc.ensure_dir(os.path.dirname(out_rgb_mpath.format(obj_id, 0)))
    misc.ensure_dir(os.path.dirname(out_labels_path.format(obj_id, 0)))
    misc.ensure_dir(os.path.dirname(out_mask_mpath.format(obj_id, 0)))

    # Load model
    model_path = par['model_mpath'].format(obj_id)
    model = inout.load_ply(model_path)

    mesh = MeshPly(model_path)
    vertices = np.c_[np.array(mesh.vertices),
                     np.ones((len(mesh.vertices), 1))].transpose()
    corners3D = get_3D_corners(vertices)

    # Load model texture
    if par['model_texture_mpath']:
        model_texture_path = par['model_texture_mpath'].format(obj_id)
Beispiel #2
0
# Path masks of the output visualizations
vis_base = '../output/vis_gt_visib_{}_delta={}/{:02d}/'
vis_mpath = vis_base + '{:' + str(dp['im_id_pad']).zfill(2) + 'd}_{:02d}.jpg'
# vis_delta_mpath = vis_base + '{:' + str(dp['im_id_pad']).zfill(2) +\
#                   'd}_{:02d}_diff_below_delta={}.jpg'

print('Loading object models...')
models = {}
for obj_id in obj_ids:
    models[obj_id] = inout.load_ply(dp['model_mpath'].format(obj_id))

# visib_to_below_delta_fracs = []
for data_id in data_ids:
    if do_vis:
        misc.ensure_dir(
            os.path.dirname(vis_mpath.format(dataset, delta, data_id, 0, 0)))

    # Load scene info and gts
    info = inout.load_info(dp[info_mpath_key].format(data_id))
    gts = inout.load_gt(dp[gt_mpath_key].format(data_id))

    im_ids = sorted(gts.keys())
    gt_stats = {}
    for im_id in im_ids:
        print('dataset: {}, scene/obj: {}, im: {}'.format(
            dataset, data_id, im_id))

        K = info[im_id]['cam_K']
        depth_path = dp[depth_mpath_key].format(data_id, im_id)
        depth_im = inout.load_depth(depth_path)
        depth_im *= dp['cam']['depth_scale']  # to [mm]
Beispiel #3
0
# Whether to consider only the specified subset of images
use_image_subset = False

# Subset of images to be considered
# 使用部分图片集合
if use_image_subset:
    im_ids_sets = inout.load_yaml(dp['test_set_fpath'])
else:
    im_ids_sets = None

scene_ids_curr = range(1, dp['scene_count'] + 1)
if scene_ids:
    scene_ids_curr = set(scene_ids_curr).intersection(scene_ids) # 求交集
for scene_id in scene_ids_curr:
    #  创建文件夹
    misc.ensure_dir(os.path.dirname(out_rgb_mpath.format(dataset, scene_id, 0)))
    misc.ensure_dir(os.path.dirname(out_depth_mpath.format(dataset, scene_id, 0)))
    misc.ensure_dir(os.path.dirname(out_seg_mpath.format(dataset, scene_id, 0)))
    misc.ensure_dir(os.path.dirname(out_obj_mpath.format(dataset, scene_id, 0)))
    misc.ensure_dir(os.path.dirname(out_info_path.format(dataset, scene_id, 0)))

    # Load scene info and gt poses
    # 加载场景信息,和gt pose
    scene_info = inout.load_info(dp['scene_info_mpath'].format(scene_id))
    scene_gt = inout.load_gt(dp['scene_gt_mpath'].format(scene_id))
    models_info = inout.load_yaml(dp['models_info_path'])
    # Load models of objects that appear in the current scene
    # 加载模型
    obj_ids = set([gt['obj_id'] for gts in scene_gt.values() for gt in gts])
    models = {}
    for obj_id in obj_ids:
Beispiel #4
0
# Indicates whether to render RGB image
vis_rgb = True

# Indicates whether to resolve visibility in the rendered RGB image (using
# depth renderings). If True, only the part of object surface, which is not
# occluded by any other modeled object, is visible. If False, RGB renderings
# of individual objects are blended together.
vis_rgb_resolve_visib = True

# Indicates whether to render depth image
vis_depth = True

# Path masks for output images
vis_rgb_mpath = '../output/vis_gt_poses_{}/{:02d}_{:04d}.jpg'
vis_depth_mpath = '../output/vis_gt_poses_{}/{:02d}_{:04d}_depth_diff.jpg'
misc.ensure_dir(os.path.dirname(vis_rgb_mpath))

scene_ids_curr = range(1, par['scene_count'] + 1)
if scene_ids:
    scene_ids_curr = set(scene_ids_curr).intersection(scene_ids)
for scene_id in scene_ids_curr:
    # Load scene info and gt poses
    scene_info = inout.load_info(par['scene_info_mpath'].format(scene_id))
    scene_gt = inout.load_gt(par['scene_gt_mpath'].format(scene_id))

    # Load models of objects that appear in the current scene
    obj_ids = set([gt['obj_id'] for gts in scene_gt.values() for gt in gts])
    models = {}
    for obj_id in obj_ids:
        models[obj_id] = inout.load_ply(par['model_mpath'].format(obj_id))
Beispiel #5
0
                    elif error_type == 'cou':
                        e = pose_error.cou(R_e, t_e, R_g, t_g, models[obj_id],
                                           dp['test_im_size'], K)
                    elif error_type == 're':
                        e = pose_error.re(R_e, R_g)
                    elif error_type == 'te':
                        e = pose_error.te(t_e, t_g)

                    errs_gts[gt_id] = e

                errs.append({
                    'im_id': im_id,
                    'obj_id': obj_id,
                    'est_id': est_id,
                    'score': est['score'],
                    'errors': errs_gts
                })
            # print('Evaluation time: {}s'.format(time.time() - t))

        print('Saving errors...')
        errors_path = errors_mpath.format(result_path=result_path,
                                          result_name=result_name,
                                          error_sign=error_sign,
                                          scene_id=scene_id)

        misc.ensure_dir(os.path.dirname(errors_path))
        inout.save_errors(errors_path, errs)

    print('')
print('Done.')
Beispiel #6
0
            # have read rgb, depth, pose, obj_bb, obj_id here

elif mode == 'render':
    renderer = Renderer()
    radii = [400]  # Radii of the view sphere [mm]
    azimuth_range = (0, 2 * math.pi)
    elev_range = (0, 0.5 * math.pi)

    p = dict()
    p['name'] = 'customDataset'
    p['obj_count'] = 2
    p['scene_count'] = 2
    p['train_im_size'] = (640, 480)
    p['test_im_size'] = (640, 480)
    p['base_path'] = join('/home/meiqua/6DPose/public/datasets/', p['name'])
    misc.ensure_dir(p['base_path'])

    p['model_mpath'] = join(p['base_path'], 'models', 'obj_{:02d}.ply')
    misc.ensure_dir(os.path.dirname(p['model_mpath']))
    p['model_texture_mpath'] = join(p['base_path'], 'models', 'obj_{:02d}.png')

    p['cam_params_path'] = join(p['base_path'], 'camera.yml')
    p['cam'] = inout.load_cam_params(p['cam_params_path'])
    # Minimum required number of views on the whole view sphere. The final number of
    # views depends on the sampling method.
    min_n_views = 100

    clip_near = 10  # [mm]
    clip_far = 10000  # [mm]
    ambient_weight = 0.8  # Weight of ambient light [0, 1]
    shading = 'phong'  # 'flat', 'phong'
Beispiel #7
0
# Path masks of the output visualizations
vis_base = '../output/vis_gt_visib_{}_delta={}/{:02d}/'
vis_mpath = vis_base + '{:' + str(dp['im_id_pad']).zfill(2) + 'd}_{:02d}.jpg'
# vis_delta_mpath = vis_base + '{:' + str(dp['im_id_pad']).zfill(2) +\
#                   'd}_{:02d}_diff_below_delta={}.jpg'

print('Loading object models...')
models = {}
for obj_id in obj_ids:
    models[obj_id] = inout.load_ply(dp['model_mpath'].format(obj_id))

# visib_to_below_delta_fracs = []
for data_id in data_ids:
    if do_vis:
        misc.ensure_dir(os.path.dirname(
            vis_mpath.format(dataset, delta, data_id, 0, 0)))

    # Load scene info and gts
    info = inout.load_info(dp[info_mpath_key].format(data_id))
    gts = inout.load_gt(dp[gt_mpath_key].format(data_id))

    # Considered subset of images for the current scene
    if im_ids_sets is not None:
        im_ids = im_ids_sets[data_id]
    else:
        im_ids = sorted(gts.keys())

    gt_stats = {}
    for im_id in im_ids:
        print('dataset: {}, scene/obj: {}, im: {}'.format(dataset, data_id, im_id))
out_rgb_mpath = '../output/render/{:02d}/rgb/{:04d}.png'
out_depth_mpath = '../output/render/{:02d}/depth/{:04d}.png'
out_obj_info_path = '../output/render/{:02d}/info.yml'
out_obj_gt_path = '../output/render/{:02d}/gt.yml'
out_views_vis_mpath = '../output/render/views_radius={}.ply'

# Prepare output folder
# misc.ensure_dir(os.path.dirname(out_obj_info_path))

# Image size and K for SSAA
im_size_rgb = [int(round(x * float(ssaa_fact))) for x in par['cam']['im_size']]
K_rgb = par['cam']['K'] * ssaa_fact

for obj_id in obj_ids:
    # Prepare folders
    misc.ensure_dir(os.path.dirname(out_rgb_mpath.format(obj_id, 0)))
    misc.ensure_dir(os.path.dirname(out_depth_mpath.format(obj_id, 0)))

    # Load model
    model_path = par['model_mpath'].format(obj_id)
    model = inout.load_ply(model_path)

    # Load model texture
    if par['model_texture_mpath']:
        model_texture_path = par['model_texture_mpath'].format(obj_id)
        model_texture = inout.load_im(model_texture_path)
    else:
        model_texture = None

    obj_info = {}
    obj_gt = {}
Beispiel #9
0
scene_ids = [6]  # for each obj
im_ids = []  # obj's img
gt_ids = []  # multi obj in one img
scene_ids_curr = range(1, dp['scene_count'] + 1)
if scene_ids:
    scene_ids_curr = set(scene_ids_curr).intersection(scene_ids)

mode = 'render_train'
# mode = 'test'

base_path = join(dp['base_path'], 'LCHF')
train_from_radius = 500
if mode == 'render_train':
    start_time = time.time()
    visual = True
    misc.ensure_dir(base_path)

    ssaa_fact = 4
    im_size_rgb = [int(round(x * float(ssaa_fact))) for x in dp['cam']['im_size']]
    K_rgb = dp['cam']['K'] * ssaa_fact

    LCHF_infos = []
    LCHF_linemod_feats = []
    for obj_id in obj_ids_curr:
        radii = [train_from_radius]
        azimuth_range = (0, 2 * math.pi)
        elev_range = (0, 0.5 * math.pi)
        min_n_views = 100
        clip_near = 10  # [mm]
        clip_far = 10000  # [mm]
        ambient_weight = 0.8  # Weight of ambient light [0, 1]
Beispiel #10
0
                            tex_loc = (xs[p_id], ys[p_id] - 5)
                            # tex_loc = (bbox[0], bbox[1])
                            cv2.putText(ren_rgb_info, txt, tex_loc,
                                        cv2.FONT_HERSHEY_PLAIN, 0.9,
                                        color_uint8, 1)

            # Save RGB visualization
            if vis_rgb:
                vis_im_rgb = 0.5 * rgb.astype(np.float32) + \
                             0.5 * ren_rgb + \
                             1.0 * ren_rgb_info
                vis_im_rgb[vis_im_rgb > 255] = 255
                vis_rgb_path = vis_rgb_mpath.format(
                    result_path=result_path, result_name=result_name,
                    scene_id=scene_id, im_id=im_id, obj_id=obj_id)
                misc.ensure_dir(os.path.dirname(vis_rgb_path))
                inout.save_im(vis_rgb_path, vis_im_rgb.astype(np.uint8))

            # Save image of depth differences
            if vis_depth:
                # Calculate the depth difference at pixels where both depth maps
                # are valid
                valid_mask = (depth > 0) * (ren_depth > 0)
                depth_diff = valid_mask * (depth - ren_depth.astype(np.float32))

                f, ax = plt.subplots(1, 1)
                cax = ax.matshow(depth_diff)
                ax.axis('off')
                ax.set_title('measured - GT depth [mm]')
                f.colorbar(cax, fraction=0.03, pad=0.01)
                f.tight_layout(pad=0)
Beispiel #11
0
                for gt in gts:
                    err = -1.0
                    R_gt = gt['cam_R_m2c']
                    t_gt = gt['cam_t_m2c']

                    if pose_error_fun == 'vsd':
                        err = pose_error.vsd(R_est, t_est, R_gt, t_gt, model,
                                             depth_im, delta, tau, K)
                    elif pose_error_fun == 'add':
                        err = pose_error.add(R_est, t_est, R_gt, t_gt, model)
                    elif pose_error_fun == 'adi':
                        err = pose_error.adi(R_est, t_est, R_gt, t_gt, model)
                    elif pose_error_fun == 'cou':
                        err = pose_error.cou(R_est, t_est, R_gt, t_gt, model,
                                             dp['test_im_size'], K)
                    elif pose_error_fun == 're':
                        err = pose_error.re(R_est, R_gt)
                    elif pose_error_fun == 'te':
                        err = pose_error.te(t_est, t_gt)

                    est_errs.append(err)
                errs[im_id][obj_id].append(est_errs)

        print('Saving calculated errors...')
        errs_path = errs_mpath.format(result_path=result_path,
                                      scene_id=scene_id,
                                      eval_desc=eval_desc)
        misc.ensure_dir(os.path.basename(errs_path))
        with open(errs_path, 'w') as f:
            yaml.dump(errs, f, width=10000, Dumper=yaml.CDumper)
Beispiel #12
0
                            tex_loc = (xs[p_id], ys[p_id] - 5)
                            # tex_loc = (bbox[0], bbox[1])
                            cv2.putText(ren_rgb_info, txt, tex_loc,
                                        cv2.FONT_HERSHEY_PLAIN, 0.9,
                                        color_uint8, 1)

            # Save RGB visualization
            if vis_rgb:
                vis_im_rgb = 0.5 * rgb.astype(np.float32) + \
                             0.5 * ren_rgb + \
                             1.0 * ren_rgb_info
                vis_im_rgb[vis_im_rgb > 255] = 255
                vis_rgb_path = vis_rgb_mpath.format(
                    result_path=result_path, result_name=result_name,
                    scene_id=scene_id, im_id=im_id, obj_id=obj_id)
                misc.ensure_dir(os.path.dirname(vis_rgb_path))
                inout.save_im(vis_rgb_path, vis_im_rgb.astype(np.uint8))

            # Save image of depth differences
            if vis_depth:
                # Calculate the depth difference at pixels where both depth maps
                # are valid
                valid_mask = (depth > 0) * (ren_depth > 0)
                depth_diff = valid_mask * (depth - ren_depth.astype(np.float32))

                f, ax = plt.subplots(1, 1)
                cax = ax.matshow(depth_diff)
                ax.axis('off')
                ax.set_title('measured - GT depth [mm]')
                f.colorbar(cax, fraction=0.03, pad=0.01)
                f.tight_layout(pad=0)
Beispiel #13
0
scene_ids = [6]  # for each obj
im_ids = []  # obj's img
gt_ids = []  # multi obj in one img
scene_ids_curr = range(1, dp['scene_count'] + 1)
if scene_ids:
    scene_ids_curr = set(scene_ids_curr).intersection(scene_ids)

mode = 'render_train'
# mode = 'test'

base_path = join(dp['base_path'], 'LCHF')
train_from_radius = 500
if mode == 'render_train':
    start_time = time.time()
    visual = True
    misc.ensure_dir(base_path)

    ssaa_fact = 4
    im_size_rgb = [
        int(round(x * float(ssaa_fact))) for x in dp['cam']['im_size']
    ]
    K_rgb = dp['cam']['K'] * ssaa_fact

    LCHF_infos = []
    LCHF_linemod_feats = []
    for obj_id in obj_ids_curr:
        radii = [train_from_radius]
        azimuth_range = (0, 2 * math.pi)
        elev_range = (0, 0.5 * math.pi)
        min_n_views = 100
        clip_near = 10  # [mm]
Beispiel #14
0
# Author: Tomas Hodan ([email protected])
# Center for Machine Perception, Czech Technical University in Prague

# Example of sampling views from a view sphere.

import os
import sys

sys.path.append(os.path.abspath('..'))
from pysixd import view_sampler, misc

min_n_views = 642
radius = 1
hemisphere = False

out_views_vis_path = '../output/view_sphere.ply'

misc.ensure_dir(os.path.dirname(out_views_vis_path))

# Sample views
views, views_level = view_sampler.sample_views(min_n_views, radius, hemisphere)
print('Sampled views: ' + str(len(views)))

view_sampler.save_vis(out_views_vis_path, views)
Beispiel #15
0
                    elif error_type == 'cou':
                        e = pose_error.cou(R_e, t_e, R_g, t_g, models[obj_id],
                                           dp['test_im_size'], K)
                    elif error_type == 're':
                        e = pose_error.re(R_e, R_g)
                    elif error_type == 'te':
                        e = pose_error.te(t_e, t_g)

                    errs_gts[gt_id] = e

                errs.append({
                    'im_id': im_id,
                    'obj_id': obj_id,
                    'est_id': est_id,
                    'score': est['score'],
                    'errors': errs_gts
                })
            # print('Evaluation time: {}s'.format(time.time() - t))

        print('Saving errors...')
        errors_path = errors_mpath.format(result_path=result_path,
                                          result_name=result_name,
                                          error_sign=error_sign,
                                          scene_id=scene_id)

        misc.ensure_dir(os.path.dirname(errors_path))
        inout.save_errors(errors_path, errs)

    print('')
print('Done.')
Beispiel #16
0
# If to use the original model color
vis_orig_color = False

# Define new object colors (used if vis_orig_colors == False)
colors = inout.load_yaml('../data/colors.yml')

# Path masks for output images
vis_rgb_mpath = '../output/vis_gt_poses_{}/{:02d}/{:04d}.jpg'
vis_depth_mpath = '../output/vis_gt_poses_{}/{:02d}/{:04d}_depth_diff.jpg'

scene_ids_curr = range(1, par['scene_count'] + 1)
if scene_ids:
    scene_ids_curr = set(scene_ids_curr).intersection(scene_ids)
for scene_id in scene_ids_curr:
    misc.ensure_dir(os.path.dirname(vis_rgb_mpath.format(dataset, scene_id,
                                                         0)))

    # Load scene info and gt poses
    scene_info = inout.load_info(par['scene_info_mpath'].format(scene_id))
    scene_gt = inout.load_gt(par['scene_gt_mpath'].format(scene_id))

    # Load models of objects that appear in the current scene
    obj_ids = set([gt['obj_id'] for gts in scene_gt.values() for gt in gts])
    models = {}
    for obj_id in obj_ids:
        models[obj_id] = inout.load_ply(par['model_mpath'].format(obj_id))

    # Visualize GT poses in the selected images
    im_ids_curr = sorted(scene_info.keys())
    if im_ids:
        im_ids_curr = set(im_ids_curr).intersection(im_ids)
            # have read rgb, depth, pose, obj_bb, obj_id here

elif mode == 'render':
    renderer = Renderer()
    radii = [400]  # Radii of the view sphere [mm]
    azimuth_range = (0, 2 * math.pi)
    elev_range = (0, 0.5 * math.pi)

    p = dict()
    p['name'] = 'customDataset'
    p['obj_count'] = 2
    p['scene_count'] = 2
    p['train_im_size'] = (640, 480)
    p['test_im_size'] = (640, 480)
    p['base_path'] = join('/home/meiqua/6DPose/public/datasets/', p['name'])
    misc.ensure_dir(p['base_path'])

    p['model_mpath'] = join(p['base_path'], 'models', 'obj_{:02d}.ply')
    misc.ensure_dir(os.path.dirname(p['model_mpath']))
    p['model_texture_mpath'] = join(p['base_path'], 'models', 'obj_{:02d}.png')

    p['cam_params_path'] = join(p['base_path'], 'camera.yml')
    p['cam'] = inout.load_cam_params(p['cam_params_path'])
    # Minimum required number of views on the whole view sphere. The final number of
    # views depends on the sampling method.
    min_n_views = 100

    clip_near = 10  # [mm]
    clip_far = 10000  # [mm]
    ambient_weight = 0.8  # Weight of ambient light [0, 1]
    shading = 'phong'  # 'flat', 'phong'
            line_elems = map(float,
                             [e for e in lines[y + 1].split(' ') if e != ''])
            for x in range(mat_size[0]):
                mat[y, x] = line_elems[x]
    return mat


with open(bbox_cens_path, 'r') as f:
    bbox_cens = np.array(yaml.load(f))

for scene_id in scene_ids:
    scene_info = {}
    scene_gt = {}

    # Prepare folders
    misc.ensure_dir(os.path.dirname(rgb_out_mpath.format(scene_id, 0)))
    misc.ensure_dir(os.path.dirname(depth_out_mpath.format(scene_id, 0)))

    # Get list of image IDs
    color_fpaths = glob.glob(rgb_in_mpath.format(scene_id, '*'))
    im_ids = sorted(
        [int(e.split('color')[1].split('.jpg')[0]) for e in color_fpaths])

    # Load object model
    obj_id = scene_id  # The object id is the same as scene id for this dataset
    model = inout.load_ply(model_mpath.format(obj_id))

    # Transformation which was applied to the object models (its inverse will
    # be applied to the GT poses):
    # 1) Translate the bounding box center to the origin
    # 2) Rotate around Y axis by pi + flip for some objects
Beispiel #19
0
    obj_ids_curr = set(obj_ids_curr).intersection(obj_ids)

# renderer = Renderer()

mode = 'test'

# template_saved_to = join(dp['base_path'], 'linemod', '%s.yaml')
# tempInfo_saved_to = join(dp['base_path'], 'linemod', '{:02d}_info.yaml')
template_saved_to = join(dp['base_path'], 'linemod_render', '%s.yaml')
tempInfo_saved_to = join(dp['base_path'], 'linemod_render', '{:02d}_info.yaml')
if mode == 'train':
    start_time = time.time()
    # im_ids = list(range(1, 1000, 10))  # obj's img
    im_ids = []
    visual = True
    misc.ensure_dir(os.path.dirname(template_saved_to))

    for obj_id in obj_ids_curr:
        scene_info = inout.load_info(dp['obj_info_mpath'].format(obj_id))
        scene_gt = inout.load_gt(dp['obj_gt_mpath'].format(obj_id))

        im_ids_curr = sorted(scene_info.keys())

        if im_ids:
            im_ids_curr = set(im_ids_curr).intersection(im_ids)

        templateInfo = dict()
        for im_id in im_ids_curr:
            print('obj: {}, im: {}'.format(obj_id, im_id))

            # Load the images
current_dep = dep_min
while current_dep < dep_max:
    dep_anchors.append(int(current_dep))
    current_dep = current_dep*dep_anchor_step

# dep_anchors = dep_anchors[1:-1]  # discard two border dep

print('\ndep anchors:\n {}, \ndep range: {}\n'.format(dep_anchors, dep_range))

top_level_path = os.path.dirname(os.path.abspath(__file__))
template_saved_to = join(dp['base_path'], 'linemod_render_up', '%s.yaml')
tempInfo_saved_to = join(dp['base_path'], 'linemod_render_up', '{:02d}_info_{}.yaml')
result_base_path = join(top_level_path, 'public', 'sixd_results', 'patch-linemod_'+dataset)

misc.ensure_dir(os.path.dirname(template_saved_to))
misc.ensure_dir(os.path.dirname(tempInfo_saved_to))
misc.ensure_dir(result_base_path)

if mode == 'render_train':
    start_time = time.time()

    im_size = dp['cam']['im_size']
    shape = (im_size[1], im_size[0])

    # Frame buffer object, bind here to avoid memory leak, maybe?
    window = renderer.app.Window(visible=False)
    color_buf = np.zeros((shape[0], shape[1], 4), np.float32).view(renderer.gloo.TextureFloat2D)
    depth_buf = np.zeros((shape[0], shape[1]), np.float32).view(renderer.gloo.DepthTexture)
    fbo = renderer.gloo.FrameBuffer(color=color_buf, depth=depth_buf)
    fbo.activate()
Beispiel #21
0
out_rgb_mpath = '../output/render/{:02d}/rgb/{:04d}.png'
out_depth_mpath = '../output/render/{:02d}/depth/{:04d}.png'
out_obj_info_path = '../output/render/{:02d}/info.yml'
out_obj_gt_path = '../output/render/{:02d}/gt.yml'
out_views_vis_mpath = '../output/render/views_radius={}.ply'

# Prepare output folder
# misc.ensure_dir(os.path.dirname(out_obj_info_path))

# Image size and K for SSAA
im_size_rgb = [int(round(x * float(ssaa_fact))) for x in par['cam']['im_size']]
K_rgb = par['cam']['K'] * ssaa_fact

for obj_id in obj_ids:
    # Prepare folders
    misc.ensure_dir(os.path.dirname(out_rgb_mpath.format(obj_id, 0)))
    misc.ensure_dir(os.path.dirname(out_depth_mpath.format(obj_id, 0)))

    # Load model
    model_path = par['model_mpath'].format(obj_id)
    model = inout.load_ply(model_path)

    # Load model texture
    if par['model_texture_mpath']:
        model_texture_path = par['model_texture_mpath'].format(obj_id)
        model_texture = inout.load_im(model_texture_path)
    else:
        model_texture = None

    obj_info = {}
    obj_gt = {}