R_model_inv = np.linalg.inv(R_model)

    for im_id in im_ids:
        if im_id % 10 == 0:
            print('scene,view: ' + str(scene_id) + ',' + str(im_id))

        # Load the RGB and depth image
        rgb = inout.load_im(rgb_in_mpath.format(scene_id, im_id))
        depth = load_hinter_depth(depth_in_mpath.format(scene_id, im_id))

        depth *= 10.0  # Convert depth map to [100um]

        # Save the RGB and depth image
        inout.save_im(rgb_out_mpath.format(scene_id, im_id), rgb)
        inout.save_depth(depth_out_mpath.format(scene_id, im_id), depth)

        # Load the GT pose
        R_m2c = load_hinter_mat(rot_mpath.format(scene_id, im_id))
        t_m2c = load_hinter_mat(tra_mpath.format(scene_id, im_id))
        t_m2c *= 10  # Convert to [mm]

        # Transfom the GT pose (to compensate transformation of the models)
        R_m2c = R_m2c.dot(R_model_inv)
        t_m2c = t_m2c + R_m2c.dot(R_model.dot(t_model))

        # Get 2D bounding box of the object model at the ground truth pose
        obj_bb = misc.calc_pose_2d_bbox(model, par['cam']['im_size'],
                                        par['cam']['K'], R_m2c, t_m2c)

        # Visualisation
Exemplo n.º 2
0
            depth /= par['cam']['depth_scale']

            # Render RGB image
            rgb = renderer.render(model, im_size_rgb, K_rgb, view['R'], view['t'],
                                  clip_near, clip_far, texture=model_texture,
                                  ambient_weight=ambient_weight, shading=shading,
                                  mode='rgb')

            # The OpenCV function was used for rendering of the training images
            # provided for the SIXD Challenge 2017.
            rgb = cv2.resize(rgb, par['cam']['im_size'], interpolation=cv2.INTER_AREA)
            #rgb = scipy.misc.imresize(rgb, par['cam']['im_size'][::-1], 'bicubic')

            # Save the rendered images
            inout.save_im(out_rgb_mpath.format(obj_id, im_id), rgb)
            inout.save_depth(out_depth_mpath.format(obj_id, im_id), depth)

            # Get 2D bounding box of the object model at the ground truth pose
            ys, xs = np.nonzero(depth > 0)
            obj_bb = misc.calc_2d_bbox(xs, ys, par['cam']['im_size'])

            obj_info[im_id] = {
                'cam_K': par['cam']['K'].flatten().tolist(),
                'view_level': int(views_level[view_id]),
                #'sphere_radius': float(radius)
            }

            obj_gt[im_id] = [{
                'cam_R_m2c': view['R'].flatten().tolist(),
                'cam_t_m2c': view['t'].flatten().tolist(),
                'obj_bb': [int(x) for x in obj_bb],
Exemplo n.º 3
0
                depth /= p['cam']['depth_scale']

                # Render RGB image
                rgb = renderer.render(model, im_size_rgb, K_rgb, view['R'], view['t'],
                                      clip_near, clip_far, texture=model_texture,
                                      ambient_weight=ambient_weight, shading=shading,
                                      mode='rgb')

                # The OpenCV function was used for rendering of the training images
                # provided for the SIXD Challenge 2017.
                rgb = cv2.resize(rgb, p['cam']['im_size'], interpolation=cv2.INTER_AREA)
                # rgb = scipy.misc.imresize(rgb, par['cam']['im_size'][::-1], 'bicubic')

                # Save the rendered images
                inout.save_im(out_rgb_mpath.format(obj_id, im_id), rgb)
                inout.save_depth(out_depth_mpath.format(obj_id, im_id), depth)

                # Get 2D bounding box of the object model at the ground truth pose
                ys, xs = np.nonzero(depth > 0)
                obj_bb = misc.calc_2d_bbox(xs, ys, p['cam']['im_size'])

                obj_info[im_id] = {
                    'cam_K': p['cam']['K'].flatten().tolist(),
                    'view_level': int(views_level[view_id]),
                    # 'sphere_radius': float(radius)
                }

                obj_gt[im_id] = [{
                    'cam_R_m2c': view['R'].flatten().tolist(),
                    'cam_t_m2c': view['t'].flatten().tolist(),
                    'obj_bb': [int(x) for x in obj_bb],
Exemplo n.º 4
0



            black_out_mask[mask] = white_mask[mask].astype(black_out_mask.dtype)

            #################### save imgs##################
            if im_id > 300:
                now_test = True
                # print "out"
            if(not now_test):

                inout.save_im(out_rgb_mpath.format(dataset, scene_id, im_id),
                              rgb.astype(np.uint8))

                inout.save_depth(out_depth_mpath.format(dataset,obj_id, im_id), depth)

                from numpngw import write_png
                write_png(out_obj_mpath.format(dataset, scene_id, im_id), img_obj.astype(np.uint16))

                inout.save_im(out_seg_mpath.format(dataset, scene_id, im_id),
                              black_out_mask.astype(np.uint8))

                R_str = [[str(num) for num in item] for item in R.tolist() ]
                R_str = [" ".join(item) for item in R_str]
                R_str = [item+'\n' for item in R_str]

                t_str = [str(item/1000)  for item in t.squeeze().tolist()]
                t_str = " ".join(t_str)

                size_x = models_info[gt['obj_id']]["size_x"]
import os
import sys
import glob
import numpy as np

sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from pysixd import inout

from params.dataset_params import get_dataset_params
par = get_dataset_params('hinterstoisser')

# data_ids = range(1, par.obj_count + 1)
data_ids = range(1, par['scene_count'] + 1)

# depth_mpath = par.train_depth_mpath
depth_mpath = par['test_depth_mpath']

scale = 0.1

for data_id in data_ids:
    print('Processing id: ' + str(data_id))
    depth_paths = sorted(
        glob.glob(
            os.path.join(os.path.dirname(depth_mpath.format(data_id, 0)),
                         '*')))
    for depth_path in depth_paths:
        d = inout.load_depth(depth_path)
        d *= scale
        d = np.round(d).astype(np.uint16)
        inout.save_depth(depth_path, d)