Пример #1
0
    def __init__(self,
                 models_cad_files,
                 vertex_tmp_store_folder,
                 vertex_scale,
                 width,
                 height,
                 K,
                 augmenters,
                 vocdevkit_path,
                 min_num_objects_per_scene,
                 max_num_objects_per_scene,
                 near_plane=10,
                 far_plane=2000,
                 min_n_views=1000,
                 radius=650,
                 obj_ids=None,
                 model_type='reconst'):

        self._models_cad_files = models_cad_files
        self._width = width
        self._height = height
        self._radius = radius
        self._K = K
        self._augmenters = augmenters
        self._min_num_objects_per_scene = min_num_objects_per_scene
        self._max_num_objects_per_scene = max_num_objects_per_scene
        self._near_plane = near_plane
        self._far_plane = far_plane
        self.obj_ids = np.array(obj_ids)

        # pascal_imgs_path = os.path.join(vocdevkit_path, 'VOC2012/JPEGImages')
        self._voc_imgs = glob.glob(os.path.join(
            vocdevkit_path, '*.jpg')) + glob.glob(
                os.path.join(vocdevkit_path, '*.png'))
        print len(self._voc_imgs)
        if model_type == 'reconst':
            self._renderer = mr_phong.Renderer(
                self._models_cad_files,
                1,
                vertex_tmp_store_folder=vertex_tmp_store_folder,
                vertex_scale=vertex_scale)
        elif model_type == 'cad':
            self._renderer = mr.Renderer(
                self._models_cad_files,
                1,
                vertex_tmp_store_folder=vertex_tmp_store_folder,
                vertex_scale=vertex_scale)
        else:
            print 'unknown model_type, ', model_type
            exit()

        azimuth_range = (0, 2 * math.pi)
        elev_range = (-0.5 * math.pi, 0.5 * math.pi)
        self.all_views, _ = view_sampler.sample_views(min_n_views, radius,
                                                      azimuth_range,
                                                      elev_range)
Пример #2
0
# test pysixd sampler

import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D

import pysixd.view_sampler as vSampler
import numpy as np

fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')

# pts, pts_level = vSampler.hinter_sampling(60)
# pts = vSampler.fibonacci_sampling(49)

# use views to transform pt 1, 0, 0
views, levels = vSampler.sample_views(50)
pts = np.zeros(shape=(len(views), 3))
for i in range(len(views)):
    view = views[i]
    R = view['R']
    t = view['t']
    testPt = np.matrix([1.0, 0, 0]).T
    temp = R.dot(testPt) + t
    pts[i, :] = temp.reshape(3,)

print(pts.shape)
ax.scatter(xs=pts[:,0], ys=pts[:, 1], zs=pts[:, 2])
plt.show()

print("end of line for break points")
            program['u_texture'] = np.zeros((1, 1, 4), np.float32)

        # OpenGL setup
        renderer.gl.glEnable(renderer.gl.GL_DEPTH_TEST)
        renderer.gl.glViewport(0, 0, shape[1], shape[0])
        renderer.gl.glDisable(renderer.gl.GL_CULL_FACE)
        ######################################################

        # in our test, for complex objects fast-train performs badly...
        fast_train = False  # just scale templates
        if fast_train:
            # Sample views

            # with camera tilt
            views, views_level = view_sampler.sample_views(min_n_views, dep_anchors[0],
                                                           azimuth_range, elev_range,
                                                           tilt_range=(-math.pi, math.pi),
                                                           tilt_step=math.pi / 8)

            print('Sampled views: ' + str(len(views)))

            templateInfo_radius = dict()
            for dep in dep_anchors:
                templateInfo_radius[dep] = dict()

            # Render the object model from all the views
            for view_id, view in enumerate(views):

                if view_id % 10 == 0:
                    print('obj,radius,view: ' + str(obj_id) +
                          ',' + str(dep_anchors[0]) + ',' + str(view_id) + ', view_id: ', view_id)
Пример #4
0
    model_path = par['model_mpath'].format(obj_id)
    model = inout.load_ply(model_path)

    # Load model texture
    if par['model_texture_mpath']:
        model_texture_path = par['model_texture_mpath'].format(obj_id)
        model_texture = inout.load_im(model_texture_path)
    else:
        model_texture = None

    obj_info = {}
    obj_gt = {}
    im_id = 0
    for radius in radii:
        # Sample views
        views, views_level = view_sampler.sample_views(min_n_views, radius,
                                                       azimuth_range, elev_range)
        print('Sampled views: ' + str(len(views)))
        view_sampler.save_vis(out_views_vis_mpath.format(str(radius)),
                              views, views_level)

        # Render the object model from all the views
        for view_id, view in enumerate(views):
            if view_id % 10 == 0:
                print('obj,radius,view: ' + str(obj_id) +
                      ',' + str(radius) + ',' + str(view_id))

            # Render depth image
            depth = renderer.render(model, par['cam']['im_size'], par['cam']['K'],
                                    view['R'], view['t'],
                                    clip_near, clip_far, mode='depth')
Пример #5
0
# Author: Tomas Hodan ([email protected])
# Center for Machine Perception, Czech Technical University in Prague

# Example of sampling views from a view sphere.

import os
import sys

sys.path.append(os.path.abspath('..'))
from pysixd import view_sampler, misc

min_n_views = 642
radius = 1
hemisphere = False

out_views_vis_path = '../output/view_sphere.ply'

misc.ensure_dir(os.path.dirname(out_views_vis_path))

# Sample views
views, views_level = view_sampler.sample_views(min_n_views, radius, hemisphere)
print('Sampled views: ' + str(len(views)))

view_sampler.save_vis(out_views_vis_path, views)
Пример #6
0
        # Load model
        model_path = dp['model_mpath'].format(obj_id)
        model = inout.load_ply(model_path)

        # Load model texture
        if dp['model_texture_mpath']:
            model_texture_path = dp['model_texture_mpath'].format(obj_id)
            model_texture = inout.load_im(model_texture_path)
        else:
            model_texture = None

        for radius in radii:
            # Sample views
            views, views_level = view_sampler.sample_views(min_n_views, radius,
                                                           azimuth_range, elev_range,
                                                           tilt_range=(-math.pi/2, math.pi/2), tilt_step=0.2*math.pi)
            print('Sampled views: ' + str(len(views)))

            # Render the object model from all the views
            for view_id, view in enumerate(views):
                if view_id % 10 == 0:
                    print('obj,radius,view: ' + str(obj_id) +
                          ',' + str(radius) + ',' + str(view_id))

                # Render depth image
                depth = render(model, dp['cam']['im_size'], dp['cam']['K'],
                                        view['R'], view['t'],
                                        clip_near, clip_far, mode='depth')

                # Convert depth so it is in the same units as the real test images
Пример #7
0
        # Load model
        model_path = dp['model_mpath'].format(obj_id)
        model = inout.load_ply(model_path)

        # Load model texture
        if dp['model_texture_mpath']:
            model_texture_path = dp['model_texture_mpath'].format(obj_id)
            model_texture = inout.load_im(model_texture_path)
        else:
            model_texture = None

        im_id = 0
        for radius in radii:
            # Sample views
            views, views_level = view_sampler.sample_views(min_n_views, radius,
                                                           azimuth_range, elev_range,
                                                           tilt_range=(0, 2*math.pi), tilt_step=0.1*math.pi)
            print('Sampled views: ' + str(len(views)))

            # Render the object model from all the views
            for view_id, view in enumerate(views):
                if view_id % 10 == 0:
                    print('obj,radius,view: ' + str(obj_id) +
                          ',' + str(radius) + ',' + str(view_id))

                # Render depth image
                depth = render(model, dp['cam']['im_size'], dp['cam']['K'],
                                        view['R'], view['t'],
                                        clip_near, clip_far, mode='depth')

                # Convert depth so it is in the same units as the real test images
Пример #8
0
        model_path = p['model_mpath'].format(obj_id)
        model = inout.load_ply(model_path)

        # Load model texture
        if p['model_texture_mpath']:
            model_texture_path = p['model_texture_mpath'].format(obj_id)
            model_texture = inout.load_im(model_texture_path)
        else:
            model_texture = None

        obj_info = {}
        obj_gt = {}
        im_id = 0
        for radius in radii:
            # Sample views
            views, views_level = view_sampler.sample_views(min_n_views, radius,
                                                           azimuth_range, elev_range)
            print('Sampled views: ' + str(len(views)))
            # view_sampler.save_vis(out_views_vis_mpath.format(str(radius)),
            #                       views, views_level)

            # Render the object model from all the views
            for view_id, view in enumerate(views):
                if view_id % 10 == 0:
                    print('obj,radius,view: ' + str(obj_id) +
                          ',' + str(radius) + ',' + str(view_id))

                # Render depth image
                depth = renderer.render(model, p['cam']['im_size'], p['cam']['K'],
                                        view['R'], view['t'],
                                        clip_near, clip_far, mode='depth')