コード例 #1
0
def batch_render_random_camera(filename, cam_dist, num_views, width, height,
                         fovy, focal_length, theta_range=None, phi_range=None,
                         axis=None, angle=None, cam_pos=None, cam_lookat=None,
                         double_sided=False, use_quartic=False, b_shadow=True,
                         tile_size=None, save_image_queue=None):
    rendering_time = []

    obj = load_model(filename)
    # normalize the vertices
    v = obj['v']
    axis_range = np.max(v, axis=0) - np.min(v, axis=0)
    v = (v - np.mean(v, axis=0)) / max(axis_range)  # Normalize to make the largest spread 1
    obj['v'] = v

    scene = copy.deepcopy(SCENE_BASIC)

    scene['camera']['viewport'] = [0, 0, width, height]
    scene['camera']['fovy'] = np.deg2rad(fovy)
    scene['camera']['focal_length'] = focal_length

    mesh = obj_to_triangle_spec(obj)
    faces = mesh['face']
    normals = mesh['normal']
    num_tri = faces.shape[0]

    if 'disk' in scene['objects']:
        del scene['objects']['disk']
    scene['objects'].update({'triangle': {'face': None, 'normal': None, 'material_idx': None}})
    scene['objects']['triangle']['face'] = tch_var_f(faces.tolist())
    scene['objects']['triangle']['normal'] = tch_var_f(normals.tolist())
    scene['objects']['triangle']['material_idx'] = tch_var_l(np.zeros(num_tri, dtype=int).tolist())

    scene['materials']['albedo'] = tch_var_f([[0.6, 0.6, 0.6]])
    scene['tonemap']['gamma'] = tch_var_f([1.0])  # Linear output

    # generate camera positions on a sphere
    if cam_pos is None:
        cam_pos = uniform_sample_sphere(radius=cam_dist, num_samples=num_views,
                                        axis=axis, angle=angle,
                                        theta_range=theta_range, phi_range=phi_range)
    lookat = cam_lookat if cam_lookat is not None else np.mean(v, axis=0)
    scene['camera']['at'] = tch_var_f(lookat)

    for idx in range(cam_pos.shape[0]):
        scene['camera']['eye'] = tch_var_f(cam_pos[idx])

        # main render run
        start_time = time()
        res = render(scene, tile_size=tile_size, tiled=tile_size is not None,
                     shadow=b_shadow, double_sided=double_sided,
                     use_quartic=use_quartic)
        res['suffix'] = '_{}'.format(idx)
        res['camera_far'] = scene['camera']['far']
        save_image_queue.put_nowait(get_data(res))
        rendering_time.append(time() - start_time)

    # Timing statistics
    print('Rendering time mean: {}s, std: {}s'.format(np.mean(rendering_time), np.std(rendering_time)))
コード例 #2
0
def mesh_to_scene(mesh, **params):
    tri_mesh = obj_to_triangle_spec(mesh)
    if 'camera' in params:
        camera = params['camera']
    else:
        camera = {
            "eye": [0.0, 0.0, 10.0, 1.0],
            "near": 1.0,
            "focal_length": 1.0,
            "far": 1000.0,
            "fovy": 1.57079,
            "viewport": [0, 0, 320, 240],
            "up": [0.0, 1.0, 0.0, 0.0],
            "at": [0.0, 0.0, 0.0, 1.0]
        }
    if 'materials' in params:
        materials = params['materials']
    else:
        materials = {
            "albedo": [[0.0, 0.0, 0.0], [0.1, 0.1, 0.1], [0.2, 0.2, 0.2],
                       [0.5, 0.5, 0.5], [0.9, 0.1, 0.1], [0.1, 0.1, 0.8]]
        }
    if 'colors' in params:
        colors = params['colors']
    else:
        colors = [[0.0, 0.0, 0.0], [0.8, 0.1, 0.1], [0.2, 0.2, 0.2]],

    if 'lights' in params:
        lights = params['lights']
    else:
        lights = {
            'pos': np.array([
                [20., 20., 20., 1.0],
                [-15, 3., 15., 1.0],
            ]),
            'color_idx': np.array([2, 1]),
            # Light attenuation factors have the form (kc, kl, kq) and eq: 1/(kc + kl * d + kq * d^2)
            'attenuation': np.array([[0., 1., 0.], [0., 0., 1.]])
        },

    mesh_scene = {
        'camera': camera,
        'lights': lights,
        'colors': colors,
        'materials': materials,
        'objects': {
            'triangle': tri_mesh
        },
        'tonemap': {
            'type': 'gamma',
            'gamma': 0.8
        },
    }
    return mesh_scene
コード例 #3
0
    def __getitem__(self, idx):
        """Get item."""
        # Get object path
        obj_path = os.path.join(self.opt.root_dir, self.samples[idx])
        #print(obj_path)

        # Load obj model
        obj_model = load_model(obj_path)

        if self.opt.use_mesh:
            # normalize the vertices
            v = obj_model['v']
            axis_range = np.max(v, axis=0) - np.min(v, axis=0)
            v = (v - np.mean(v, axis=0)) / max(
                axis_range)  # Normalize to make the largest spread 1
            obj_model['v'] = v
            mesh = obj_to_triangle_spec(obj_model)
            meshes = {
                'face': mesh['face'].astype(np.float32),
                'normal': mesh['normal'].astype(np.float32)
            }
            sample = {'synset': 0, 'mesh': meshes}
        else:
            # Sample points from the 3D mesh
            v, vn = uniform_sample_mesh(obj_model,
                                        num_samples=self.opt.n_splats)
            # Normalize the vertices
            v = (v - np.mean(v, axis=0)) / (v.max() - v.min())

            # Save the splats
            splats = {
                'pos': v.astype(np.float32),
                'normal': vn.astype(np.float32)
            }

            # Add model and synset to the output dictionary
            sample = {'synset': 0, 'splats': splats}

        # Transform
        if self.transform:
            sample = self.transform(sample)

        return sample
コード例 #4
0
def load_scene(scene_filename):
    """Loads a diffrend scene file
    Args:
        fname:

    Returns:
        scene
    """
    import json
    import os

    with open(scene_filename, 'r') as fid:
        scene = json.load(fid)

    basedir = os.path.dirname(scene_filename)
    objects = scene['objects']['obj']
    mesh = {'face': None, 'normal': None, 'material_idx': None}
    for obj in objects:
        print(obj)
        model_path = os.path.join(basedir, obj['path'])
        print(os.path.exists(model_path))
        obj_model = load_obj(model_path)
        scale = get_param_value('scale', obj, None)
        rotate = get_param_value('rotate', obj, None)
        translate = get_param_value('translate', obj, None)
        obj_model = transform_model(obj_model, scale, rotate, translate)
        meshes = obj_to_triangle_spec(obj_model)
        material_idx = np.ones(meshes['face'].shape[0]) * obj['material_idx']
        if mesh['face'] is None:
            mesh['face'] = meshes['face']
            mesh['normal'] = meshes['normal']
            mesh['material_idx'] = material_idx
        else:
            mesh['face'] = np.concatenate((mesh['face'], meshes['face']))
            mesh['normal'] = np.concatenate((mesh['normal'], meshes['normal']))
            mesh['material_idx'] = np.concatenate(
                (mesh['material_idx'], material_idx))
    scene['objects']['triangle'] = mesh
    del scene['objects']['obj']
    return scene
コード例 #5
0
    def __getitem__(self, idx):
        """Get item."""
        # Get object path
        obj_path1 = os.path.join(self.opt.root_dir1, 'cube.obj')
        obj_path2 = os.path.join(self.opt.root_dir2, 'sphere_halfbox.obj')
        obj_path3 = os.path.join(self.opt.root_dir3, 'cone.obj')
        # obj_path4 = os.path.join(self.opt.root_dir4, self.samples[idx])

        if not self.loaded:
            self.fg_obj1 = load_model(obj_path1)
            self.fg_obj2 = load_model(obj_path2)
            self.fg_obj3 = load_model(obj_path3)
            # self.fg_obj4 = load_model(obj_path4)
            self.bg_obj = load_model(self.opt.bg_model)
            self.loaded = True
        offset_id = np.random.permutation(4)
        obj_model1 = self.fg_obj1
        obj_model2 = self.fg_obj2
        obj_model3 = self.fg_obj3
        # obj_model4 = self.fg_obj4
        obj2 = self.bg_obj
        v11 = (obj_model1['v'] - obj_model1['v'].mean()) / (
            obj_model1['v'].max() - obj_model1['v'].min())
        v12 = (obj_model2['v'] - obj_model2['v'].mean()) / (
            obj_model2['v'].max() - obj_model2['v'].min())
        v13 = (obj_model3['v'] - obj_model3['v'].mean()) / (
            obj_model3['v'].max() - obj_model3['v'].min())
        # v14 = (obj_model4['v'] - obj_model4['v'].mean()) / (obj_model4['v'].max() - obj_model4['v'].min())
        v2 = obj2['v']  # / (obj2['v'].max() - obj2['v'].min())
        scale = (obj2['v'].max() - obj2['v'].min()) * 0.22
        offset = np.array([[6.9, 6.9, 7.0], [20.4, 6.7,
                                             6.7], [20.4, 6.7, 20.2],
                           [7.0, 6.7, 20.4]
                           ])  #+ 2 * np.random.rand(3) #[8.0, 5.0, 18.0],
        if self.opt.only_background:
            v = v2
            f = obj2['f']
        elif self.opt.only_foreground:
            v = v1
            f = obj_model['f']
        else:
            if self.opt.random_rotation:
                random_axis = np_normalize(np.random.rand(3))
                random_angle = np.random.rand(1) * np.pi * 2
                M = axis_angle_matrix(axis=random_axis, angle=random_angle)
                M[:3, 3] = offset[offset_id[0]] + 1.5 * np.random.randn(3)
                v11 = np.matmul(scale * v11,
                                M.transpose(1, 0)[:3, :3]) + M[:3, 3]
            else:
                # random_axis = np_normalize(np.random.rand(3))
                # random_angle = np.random.rand(1) * np.pi * 2
                # M = axis_angle_matrix(axis=random_axis, angle=random_angle)
                # M[:3, 3] = offset[offset_id[0]]#+1.5*np.random.randn(3)
                # v11 = np.matmul(scale * v11, M.transpose(1, 0)[:3, :3]) + M[:3, 3]
                #
                # random_axis2 = np_normalize(np.random.rand(3))
                # random_angle2 = np.random.rand(1) * np.pi * 2
                # M2 = axis_angle_matrix(axis=random_axis2, angle=random_angle2)
                # M2[:3, 3] = offset[offset_id[2]]#+1.5*np.random.randn(3)
                # v13 = np.matmul(scale * v13, M2.transpose(1, 0)[:3, :3]) + M2[:3, 3]
                v11 = scale * v11 + offset[
                    offset_id[0]] + 1.5 * np.random.randn(3)
                v12 = scale * v12 + offset[
                    offset_id[1]] + 1.5 * np.random.randn(3)
                v13 = scale * v13 + offset[
                    offset_id[2]] + 1.5 * np.random.randn(3)
                # v14 = scale * v14 + offset[offset_id[3]]
            # v = np.concatenate((v11,v12,v13,v14, v2))
            # f = np.concatenate((obj_model1['f'],obj_model2['f']+ v11.shape[0],obj_model3['f']+ v12.shape[0],obj_model4['f']+ v13.shape[0],obj2['f'] + v14.shape[0]))
            v = np.concatenate((v11, v12, v13, v2))
            #import ipdb; ipdb.set_trace()
            f = np.concatenate(
                (obj_model1['f'], obj_model2['f'] + v11.shape[0],
                 obj_model3['f'] + v12.shape[0] + v11.shape[0],
                 obj2['f'] + v13.shape[0] + v12.shape[0] + v11.shape[0]))

        obj_model = {'v': v, 'f': f}

        if self.opt.use_mesh:
            # normalize the vertices
            v = obj_model['v']
            axis_range = np.max(v, axis=0) - np.min(v, axis=0)
            v = (v - np.mean(v, axis=0)) / max(
                axis_range)  # Normalize to make the largest spread 1
            obj_model['v'] = v
            mesh = obj_to_triangle_spec(obj_model)
            meshes = {
                'face': mesh['face'].astype(np.float32),
                'normal': mesh['normal'].astype(np.float32)
            }
            sample = {'synset': 0, 'mesh': meshes}
        else:
            # Sample points from the 3D mesh
            v, vn = uniform_sample_mesh(obj_model,
                                        num_samples=self.opt.n_splats)
            # Normalize the vertices
            v = (v - np.mean(v, axis=0)) / (v.max() - v.min())

            # Save the splats
            splats = {
                'pos': v.astype(np.float32),
                'normal': vn.astype(np.float32)
            }

            # Add model and synset to the output dictionary
            sample = {'synset': 0, 'splats': splats}

        # Transform
        if self.transform:
            sample = self.transform(sample)

        return sample
コード例 #6
0
    def __getitem__(self, idx):
        """Get item."""
        # Get object path
        synset, obj = self.samples[idx]
        obj_path = os.path.join(self.opt.root_dir, synset, obj, 'models',
                                'model_normalized.obj')

        # Load obj model
        obj_model = load_model(obj_path)

        # Show loaded model
        # animate_sample_generation(model_name=None, obj=obj_model,
        #                           num_samples=1000, out_dir=None,
        #                           resample=False, rotate_angle=360)
        if self.opt.bg_model is not None:
            # add a background to the shapenet model
            bg_model = load_model(self.opt.bg_model)
            bg_v = bg_model['v']
            scale = (bg_v.max() - bg_v.min()) * 0.25
            offset = np.array([9, 9, 10])  #+ 2 * np.random.rand(3)
            v1 = (obj_model['v'] - obj_model['v'].mean()) / (
                obj_model['v'].max() - obj_model['v'].min())
            v = np.concatenate((scale * v1 + offset, bg_v))
            f = np.concatenate((obj_model['f'], bg_model['f'] + v1.shape[0]))
            obj_model = {'v': v, 'f': f}

        if self.opt.use_mesh:
            # normalize the vertices
            v = obj_model['v']
            axis_range = np.max(v, axis=0) - np.min(v, axis=0)
            v = (v - np.mean(v, axis=0)) / max(
                axis_range)  # Normalize to make the largest spread 1
            obj_model['v'] = v
            mesh = obj_to_triangle_spec(obj_model)
            meshes = {
                'face': mesh['face'].astype(np.float32),
                'normal': mesh['normal'].astype(np.float32)
            }
            sample = {'synset': synset, 'mesh': meshes}
        else:
            # Sample points from the 3D mesh
            v, vn = uniform_sample_mesh(obj_model,
                                        num_samples=self.opt.n_splats)
            # Normalize the vertices
            v = (v - np.mean(v, axis=0)) / (v.max() - v.min())

            # Save the splats
            splats = {
                'pos': v.astype(np.float32),
                'normal': vn.astype(np.float32)
            }

            # Convert model to splats and render views
            # samples = self._generate_samples(obj_model)

            # Add model and synset to the output dictionary
            # sample = {'obj': obj_model, 'synset': synset, 'splats': splats}
            sample = {'synset': synset, 'splats': splats}

        # Transform
        if self.transform:
            sample = self.transform(sample)

        return sample
コード例 #7
0
def setup_mesh(mesh, material, path_prefix=None):
    tri_mesh = obj_to_triangle_spec(mesh, material=None)
    return tri_mesh
コード例 #8
0
                'material_idx': [3, 3]
            },
        },
        'tonemap': {
            'type': 'gamma',
            'gamma': 0.8
        },
    }
    write_scene(scene_v0, 'test_scene.json')
    test_scene = load_scene('test_scene.json')
    match = test_scene == scene_v0
    if not match:
        print('Test failed')

    return match


def test_scene_loading(filename):
    pass


if __name__ == '__main__':
    scene = load_scene('../../data/basic_scene.json')
    bunny_scene = load_scene('../../data/bunny_scene.json')

    mesh = load_mesh_from_file('../../data/bunny.obj')
    tri_mesh = obj_to_triangle_spec(mesh)

    mesh_scene = mesh_to_scene(mesh)
    write_scene(mesh_scene, 'bunny_scene.json')
コード例 #9
0
    def __getitem__(self, idx):
        """Get item."""
        # Get object path
        obj_path = os.path.join(self.opt.root_dir, self.samples[idx])

        if not self.loaded:
            self.fg_obj = load_model(obj_path)
            self.bg_obj = load_model(self.opt.bg_model)
            self.loaded = True
        obj_model = self.fg_obj
        obj2 = self.bg_obj
        v1 = (obj_model['v'] - obj_model['v'].mean()) / (obj_model['v'].max() -
                                                         obj_model['v'].min())
        v2 = obj2['v']  # / (obj2['v'].max() - obj2['v'].min())
        scale = (obj2['v'].max() - obj2['v'].min()) * 0.4
        offset = np.array([14.0, 8.0, 12.0])  #+ 2 * np.abs(np.random.randn(3))
        if self.opt.only_background:
            v = v2
            f = obj2['f']
        elif self.opt.only_foreground:
            v = v1
            f = obj_model['f']
        else:
            if self.opt.random_rotation:
                random_axis = np_normalize(self.opt.axis)
                random_angle = np.random.rand(1) * np.pi * 2
                M = axis_angle_matrix(axis=random_axis, angle=random_angle)
                M[:3, 3] = offset
                v1 = np.matmul(scale * v1,
                               M.transpose(1, 0)[:3, :3]) + M[:3, 3]
            else:
                v1 = scale * v1 + offset
            v = np.concatenate((v1, v2))
            f = np.concatenate((obj_model['f'], obj2['f'] + v1.shape[0]))

        obj_model = {'v': v, 'f': f}

        if self.opt.use_mesh:
            # normalize the vertices
            v = obj_model['v']
            axis_range = np.max(v, axis=0) - np.min(v, axis=0)
            v = (v - np.mean(v, axis=0)) / max(
                axis_range)  # Normalize to make the largest spread 1
            obj_model['v'] = v
            mesh = obj_to_triangle_spec(obj_model)
            meshes = {
                'face': mesh['face'].astype(np.float32),
                'normal': mesh['normal'].astype(np.float32)
            }
            sample = {'synset': 0, 'mesh': meshes}
        else:
            # Sample points from the 3D mesh
            v, vn = uniform_sample_mesh(obj_model,
                                        num_samples=self.opt.n_splats)
            # Normalize the vertices
            v = (v - np.mean(v, axis=0)) / (v.max() - v.min())

            # Save the splats
            splats = {
                'pos': v.astype(np.float32),
                'normal': vn.astype(np.float32)
            }

            # Add model and synset to the output dictionary
            sample = {'synset': 0, 'splats': splats}

        # Transform
        if self.transform:
            sample = self.transform(sample)

        return sample
コード例 #10
0
def render_random_camera(filename,
                         out_dir,
                         num_samples,
                         radius,
                         cam_dist,
                         num_views,
                         width,
                         height,
                         fovy,
                         focal_length,
                         norm_depth_image_only,
                         theta_range=None,
                         phi_range=None,
                         axis=None,
                         angle=None,
                         cam_pos=None,
                         cam_lookat=None,
                         use_mesh=False,
                         double_sided=False,
                         use_quartic=False,
                         b_shadow=True,
                         b_display=False,
                         tile_size=None):
    """
    Randomly generate N samples on a surface and render them. The samples include position and normal, the radius is set
    to a constant.
    """
    sampling_time = []
    rendering_time = []

    obj = load_model(filename)
    # normalize the vertices
    v = obj['v']
    axis_range = np.max(v, axis=0) - np.min(v, axis=0)
    v = (v - np.mean(v, axis=0)) / max(
        axis_range)  # Normalize to make the largest spread 1
    obj['v'] = v

    if not os.path.exists(out_dir):
        os.mkdir(out_dir)

    r = np.ones(num_samples) * radius

    scene = copy.deepcopy(SCENE_BASIC)

    scene['camera']['viewport'] = [0, 0, width, height]
    scene['camera']['fovy'] = np.deg2rad(fovy)
    scene['camera']['focal_length'] = focal_length
    if use_mesh:
        mesh = obj_to_triangle_spec(obj)
        faces = mesh['face']
        normals = mesh['normal']
        num_tri = faces.shape[0]
        # if faces.shape[-1] == 3:
        #     faces = np.concatenate((faces, np.ones((faces.shape[0], faces.shape[1], 1))), axis=-1).tolist()
        # if normals.shape[-1] == 3:
        #     normals = np.concatenate((normals, ))
        if 'disk' in scene['objects']:
            del scene['objects']['disk']
        scene['objects'].update(
            {'triangle': {
                'face': None,
                'normal': None,
                'material_idx': None
            }})
        scene['objects']['triangle']['face'] = tch_var_f(faces.tolist())
        scene['objects']['triangle']['normal'] = tch_var_f(normals.tolist())
        scene['objects']['triangle']['material_idx'] = tch_var_l(
            np.zeros(num_tri, dtype=int).tolist())
    else:
        scene['objects']['disk']['radius'] = tch_var_f(r)
        scene['objects']['disk']['material_idx'] = tch_var_l(
            np.zeros(num_samples, dtype=int).tolist())
    scene['materials']['albedo'] = tch_var_f([[0.6, 0.6, 0.6]])
    scene['tonemap']['gamma'] = tch_var_f([1.0])  # Linear output

    # generate camera positions on a sphere
    if cam_pos is None:
        cam_pos = uniform_sample_sphere(radius=cam_dist,
                                        num_samples=num_views,
                                        axis=axis,
                                        angle=angle,
                                        theta_range=theta_range,
                                        phi_range=phi_range)
    lookat = cam_lookat if cam_lookat is not None else np.mean(v, axis=0)
    scene['camera']['at'] = tch_var_f(lookat)

    if b_display:
        h1 = plt.figure()
        h2 = plt.figure()
    for idx in range(cam_pos.shape[0]):
        if not use_mesh:
            start_time = time()
            v, vn = uniform_sample_mesh(obj, num_samples=num_samples)
            sampling_time.append(time() - start_time)

            scene['objects']['disk']['pos'] = tch_var_f(v)
            scene['objects']['disk']['normal'] = tch_var_f(vn)

        scene['camera']['eye'] = tch_var_f(cam_pos[idx])
        suffix = '_{}'.format(idx)

        # main render run
        start_time = time()
        res = render(scene,
                     tile_size=tile_size,
                     tiled=tile_size is not None,
                     shadow=b_shadow,
                     norm_depth_image_only=norm_depth_image_only,
                     double_sided=double_sided,
                     use_quartic=use_quartic)
        rendering_time.append(time() - start_time)

        im = np.uint8(255. * get_data(res['image']))
        depth = get_data(res['depth'])

        depth[depth >= scene['camera']['far']] = depth.min()
        im_depth = np.uint8(255. * (depth - depth.min()) /
                            (depth.max() - depth.min()))

        if b_display:
            plt.figure(h1.number)
            plt.imshow(im)
            plt.title('Image')
            plt.savefig(out_dir + '/fig_img' + suffix + '.png')

            plt.figure(h2.number)
            plt.imshow(im_depth)
            plt.title('Depth Image')
            plt.savefig(out_dir + '/fig_depth' + suffix + '.png')

        imsave(out_dir + '/img' + suffix + '.png', im)
        imsave(out_dir + '/depth' + suffix + '.png', im_depth)

    # Timing statistics
    if not use_mesh:
        print('Sampling time mean: {}s, std: {}s'.format(
            np.mean(sampling_time), np.std(sampling_time)))
    print('Rendering time mean: {}s, std: {}s'.format(np.mean(rendering_time),
                                                      np.std(rendering_time)))