Ejemplo n.º 1
0
 def test_save_obj(self):
     vertices, faces = neural_renderer.load_obj('./tests/data/teapot.obj')
     neural_renderer.save_obj('./tests/data/teapot2.obj', vertices, faces)
     vertices2, faces2 = neural_renderer.load_obj('./tests/data/teapot.obj')
     os.remove('./tests/data/teapot2.obj')
     assert np.allclose(vertices, vertices2)
     assert np.allclose(faces, faces2)
Ejemplo n.º 2
0
    def test_texture(self):
        position, rotation = position_rotation_from_angles(2, 15, 30)
        camera = nr.Camera(position=position, rotation=rotation)
        renderer = nr.Renderer(camera=camera)

        vertices, faces, textures = nr.load_obj(os.path.join(
            data_dir, '1cde62b063e14777c9152a706245d48/model.obj'),
                                                load_texture=True)

        images = renderer(vertices[None, :, :], faces[None, :, :],
                          textures[None, :, :, :, :, :]).permute(
                              0, 2, 3, 1).detach().cpu().numpy()
        image = (images[0] * 255).astype(np.uint8)
        imsave(os.path.join(data_dir, 'car.png'), image)

        vertices, faces, textures = nr.load_obj(os.path.join(
            data_dir, '4e49873292196f02574b5684eaec43e9/model.obj'),
                                                load_texture=True,
                                                texture_size=16)
        position, rotation = position_rotation_from_angles(2, 15, -90)
        renderer.camera.position = position
        renderer.camera.rotation = rotation
        images = renderer(vertices[None, :, :], faces[None, :, :],
                          textures[None, :, :, :, :, :]).permute(
                              0, 2, 3, 1).detach().cpu().numpy()
        image = (images[0] * 255).astype(np.uint8)
        imsave(os.path.join(data_dir, 'display.png'), image)
Ejemplo n.º 3
0
    def test_texture(self):
        renderer = neural_renderer.Renderer()

        vertices, faces, textures = neural_renderer.load_obj(
            './tests/data/1cde62b063e14777c9152a706245d48/model.obj',
            load_texture=True)

        vertices = chainer.cuda.to_gpu(vertices)
        faces = chainer.cuda.to_gpu(faces)
        textures = chainer.cuda.to_gpu(textures)
        renderer.eye = neural_renderer.get_points_from_angles(2, 15, 30)
        images = renderer.render(vertices[None, :, :], faces[None, :, :],
                                 textures[None, :, :, :, :, :]).data.get()
        scipy.misc.imsave('./tests/data/car.png',
                          scipy.misc.toimage(images[0]))

        vertices, faces, textures = neural_renderer.load_obj(
            './tests/data/4e49873292196f02574b5684eaec43e9/model.obj',
            load_texture=True,
            texture_size=16)
        vertices = chainer.cuda.to_gpu(vertices)
        faces = chainer.cuda.to_gpu(faces)
        textures = chainer.cuda.to_gpu(textures)
        renderer.eye = neural_renderer.get_points_from_angles(2, 15, -90)
        images = renderer.render(vertices[None, :, :], faces[None, :, :],
                                 textures[None, :, :, :, :, :]).data.get()
        scipy.misc.imsave('./tests/data/display.png',
                          scipy.misc.toimage(images[0]))
Ejemplo n.º 4
0
    def test_texture(self):
        renderer = nr.Renderer(camera_mode='look_at')

        vertices, faces, textures = nr.load_obj(os.path.join(
            data_dir, '1cde62b063e14777c9152a706245d48/model.obj'),
                                                load_texture=True)

        renderer.eye = nr.get_points_from_angles(2, 15, 30)
        images, depth, silhouette = renderer.render(
            vertices[None, :, :], faces[None, :, :],
            textures[None, :, :, :, :, :])
        images = images.permute(0, 2, 3, 1).detach().cpu().numpy()
        silhouette = silhouette.detach().cpu().numpy()
        depth = depth.detach().cpu().numpy()
        imsave(os.path.join(data_dir, 'car.png'), images[0])
        cv2.imshow("r", images[0, :, :, ::-1])
        cv2.imshow("d", ColorizeDepth(depth[0], 1.5, 2.5))
        cv2.imshow("s", silhouette[0])
        cv2.waitKey()

        vertices, faces, textures = nr.load_obj(os.path.join(
            data_dir, '4e49873292196f02574b5684eaec43e9/model.obj'),
                                                load_texture=True,
                                                texture_size=16)
        renderer.eye = nr.get_points_from_angles(2, 15, -90)
        images, _, _ = renderer.render(vertices[None, :, :], faces[None, :, :],
                                       textures[None, :, :, :, :, :])
        images = images.permute(0, 2, 3, 1).detach().cpu().numpy()
        imsave(os.path.join(data_dir, 'display.png'), images[0])
Ejemplo n.º 5
0
 def test_save_obj(self):
     vertices, faces = neural_renderer.load_obj('./tests/data/teapot.obj')
     neural_renderer.save_obj('./tests/data/teapot2.obj', vertices, faces)
     vertices2, faces2 = neural_renderer.load_obj('./tests/data/teapot.obj')
     os.remove('./tests/data/teapot2.obj')
     assert np.allclose(vertices, vertices2)
     assert np.allclose(faces, faces2)
Ejemplo n.º 6
0
 def test_save_obj(self):
     teapot = os.path.join(data_dir, 'teapot.obj')
     teapot2 = os.path.join(data_dir, 'teapot2.obj')
     vertices, faces = nr.load_obj(teapot)
     nr.save_obj(teapot2, vertices, faces)
     vertices2, faces2 = nr.load_obj(teapot2)
     os.remove(teapot2)
     assert torch.allclose(vertices, vertices2)
     assert torch.allclose(faces, faces2)
 def __init__(self, render_type, n_channel):
     super().__init__()
     primitive_type = render_type
     self.n_channel = n_channel
     if primitive_type == 'sphere':
         v, f = nr.load_obj('controllable_gan/templates/sphere_114.obj')
     elif primitive_type == 'cuboid':
         v, f = nr.load_obj('controllable_gan/templates/cube.obj', False)
     else:
         raise AttributeError
     self.n_latent = 128
     self.v = v
     self.f = f
Ejemplo n.º 8
0
    def test_tetrahedron(self):
        vertices_ref = np.array(
            [[1., 0., 0.], [0., 1., 0.], [0., 0., 1.], [0., 0., 0.]],
            'float32')
        faces_ref = np.array([[1, 3, 2], [3, 1, 0], [2, 0, 1], [0, 2, 3]],
                             'int32')

        obj_file = os.path.join(data_dir, 'tetrahedron.obj')
        vertices, faces = nr.load_obj(obj_file, False)
        assert (np.allclose(vertices_ref, vertices))
        assert (np.allclose(faces_ref, faces))
        vertices, faces = nr.load_obj(obj_file, True)
        assert (np.allclose(vertices_ref * 2 - 1.0, vertices))
        assert (np.allclose(faces_ref, faces))
Ejemplo n.º 9
0
    def test_tetrahedron(self):
        vertices_ref = np.array(
            [[1., 0., 0.], [0., 1., 0.], [0., 0., 1.], [0., 0., 0.]],
            'float32')
        faces_ref = np.array([[1, 3, 2], [3, 1, 0], [2, 0, 1], [0, 2, 3]],
                             'int32')

        vertices, faces = neural_renderer.load_obj(
            './tests/data/tetrahedron.obj', False)
        assert (np.allclose(vertices_ref, vertices))
        assert (np.allclose(faces_ref, faces))
        vertices, faces = neural_renderer.load_obj(
            './tests/data/tetrahedron.obj', True)
        assert (np.allclose(vertices_ref * 2 - 1.0, vertices))
        assert (np.allclose(faces_ref, faces))
Ejemplo n.º 10
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-i', '--filename_input', type=str, default=os.path.join(data_dir, 'teapot.obj'))
    parser.add_argument('-o', '--filename_output', type=str, default=os.path.join(data_dir, 'example1.gif'))
    parser.add_argument('-g', '--gpu', type=int, default=0)
    args = parser.parse_args()

    # other settings
    camera_distance = 2.732
    elevation = 30
    texture_size = 2

    # load .obj
    vertices, faces = nr.load_obj(args.filename_input)
    vertices = vertices[None, :, :]  # [num_vertices, XYZ] -> [batch_size=1, num_vertices, XYZ]
    faces = faces[None, :, :]  # [num_faces, 3] -> [batch_size=1, num_faces, 3]

    # create texture [batch_size=1, num_faces, texture_size, texture_size, texture_size, RGB]
    textures = torch.ones(1, faces.shape[1], texture_size, texture_size, texture_size, 3, dtype=torch.float32).cuda()

    # to gpu

    # create renderer
    renderer = nr.Renderer(camera_mode='look_at')

    # draw object
    loop = tqdm.tqdm(range(0, 360, 4))
    writer = imageio.get_writer(args.filename_output, mode='I')
    for num, azimuth in enumerate(loop):
        loop.set_description('Drawing')
        renderer.eye = nr.get_points_from_angles(camera_distance, elevation, azimuth)
        images = renderer(vertices, faces, textures)  # [batch_size, RGB, image_size, image_size]
        image = images.detach().cpu().numpy()[0].transpose((1, 2, 0))  # [image_size, image_size, RGB]
        writer.append_data((255*image).astype(np.uint8))
    writer.close()
Ejemplo n.º 11
0
    def __init__(self, filename_obj, filename_ref):
        super(Model, self).__init__()
        vertices, faces = nr.load_obj(filename_obj)
        self.register_buffer('vertices', vertices[None, :, :])
        self.register_buffer('faces', faces[None, :, :])

        # create textures
        texture_size = 4
        textures = torch.zeros(1,
                               self.faces.shape[1],
                               texture_size,
                               texture_size,
                               texture_size,
                               3,
                               dtype=torch.float32)
        self.textures = nn.Parameter(textures)

        # load reference image
        image_ref = torch.from_numpy(
            imread(filename_ref).astype('float32') / 255.).permute(2, 0,
                                                                   1)[None, ::]
        self.register_buffer('image_ref', image_ref)

        # setup renderer
        renderer = nr.Renderer(camera_mode='look_at')
        renderer.perspective = False
        renderer.light_intensity_directional = 0.0
        renderer.light_intensity_ambient = 1.0
        self.renderer = renderer
Ejemplo n.º 12
0
    def __init__(self,
                 dim_in=512,
                 scaling=1.,
                 filename_obj='./data/obj/sphere_642.obj'):
        super(BasicShapeDecoder, self).__init__()

        with self.init_scope():
            self.vertices_base, self.faces = neural_renderer.load_obj(
                filename_obj)
            self.num_vertices = self.vertices_base.shape[0]
            self.num_faces = self.faces.shape[0]
            self.obj_scale = 0.5
            self.object_size = 1.0
            self.scaling = scaling

            dim_hidden = [4096, 4096]
            init = chainer.initializers.HeNormal()
            self.linear1 = cl.Linear(dim_in, dim_hidden[0], initialW=init)
            self.linear2 = cl.Linear(dim_hidden[0],
                                     dim_hidden[1],
                                     initialW=init)
            self.linear_bias = cl.Linear(dim_hidden[1],
                                         self.num_vertices * 3,
                                         initialW=init)

            self.laplacian = get_graph_laplacian(self.faces, self.num_vertices)
Ejemplo n.º 13
0
    def __init__(self,
                 dim_in=512,
                 scaling=1.,
                 filename_obj='./data/obj/sphere_642.obj'):
        super(BasicSymmetricShapeDecoder, self).__init__()

        with self.init_scope():
            self.vertices_base, self.faces = neural_renderer.load_obj(
                filename_obj)
            self.num_vertices = self.vertices_base.shape[0]
            self.num_faces = self.faces.shape[0]
            self.obj_scale = 0.5
            self.object_size = 1.0
            self.scaling = scaling

            self.vertices_base, self.vertices_matrix = self.compute_vertices_matrix(
            )  # [642 * 3, 337 * 3]
            dim_out = self.vertices_matrix.shape[1]

            dim_hidden = [4096, 4096]
            init = chainer.initializers.HeNormal()
            self.linear1 = cl.Linear(dim_in, dim_hidden[0], initialW=init)
            self.linear2 = cl.Linear(dim_hidden[0],
                                     dim_hidden[1],
                                     initialW=init)
            self.linear_bias = cl.Linear(dim_hidden[1], dim_out, initialW=init)

            self.laplacian = get_graph_laplacian(self.faces, self.num_vertices)
            self.degrees = self.xp.histogram(
                self.faces,
                self.xp.arange(self.num_vertices + 1))[0].astype('int32')
Ejemplo n.º 14
0
    def stest_forward_case2(self):
        data = [[
            './tests/data/4e49873292196f02574b5684eaec43e9/model.obj',
            neural_renderer.get_points_from_angles(2.5, 10, -90),
            './tests/data/4e49873292196f02574b5684eaec43e9.png',
        ],
                [
                    './tests/data/1cde62b063e14777c9152a706245d48/model.obj',
                    neural_renderer.get_points_from_angles(2.5, 10, 60),
                    './tests/data/1cde62b063e14777c9152a706245d48.png',
                ]]

        renderer = neural_renderer.Renderer()
        renderer.draw_backside = False
        for i, (filename, viewpoint, reference) in enumerate(data):
            renderer.viewpoints = viewpoint
            ref = neural_renderer.imread(reference)

            vertices, faces, vertices_t, faces_t, textures = neural_renderer.load_obj(
                filename, load_textures=True)
            vertices, faces, vertices_t, faces_t, textures = neural_renderer.to_gpu(
                (vertices[None, :, :], faces, vertices_t[None, :, :], faces_t,
                 textures[None, :, :, :]))

            images = renderer.render(vertices, faces, vertices_t, faces_t,
                                     textures).data
            image = images[0].transpose((1, 2, 0))
            # imageio.toimage(image.get(), cmin=0, cmax=1).save(reference)

            chainer.testing.assert_allclose(ref, image, atol=1e-2)
Ejemplo n.º 15
0
 def fromobj(cls, filename_obj, normalization=True, load_texture=False, texture_size=4):
     '''
     Create a Mesh object from a .obj file
     '''
     if load_texture:
         vertices, faces, textures = nr.load_obj(filename_obj,
                                                 normalization=normalization,
                                                 texture_size=texture_size,
                                                 load_texture=True)
     else:
         vertices, faces = nr.load_obj(filename_obj,
                                       normalization=normalization,
                                       texture_size=texture_size,
                                       load_texture=False)
         textures = None
     return cls(vertices, faces, textures, texture_size)
Ejemplo n.º 16
0
    def __init__(self, filename_obj, filename_ref):
        super(Model, self).__init__()

        # load .obj
        vertices, faces = nr.load_obj(filename_obj)
        self.vertices = nn.Parameter(vertices[None, :, :])
        self.register_buffer('faces', faces[None, :, :])

        # create textures
        texture_size = 2
        textures = torch.ones(1,
                              self.faces.shape[1],
                              texture_size,
                              texture_size,
                              texture_size,
                              3,
                              dtype=torch.float32)
        self.register_buffer('textures', textures)

        # load reference image
        image_ref = torch.from_numpy(
            imread(filename_ref).astype(np.float32).mean(-1) / 255.)[None, ::]
        self.register_buffer('image_ref', image_ref)

        # setup renderer
        renderer = nr.Renderer(camera_mode='look_at')
        self.renderer = renderer
Ejemplo n.º 17
0
def exec_main():
    obj_file = 'birds3d/external/neural_renderer/examples/data/teapot.obj'
    vertices, faces = neural_renderer.load_obj(obj_file)

    renderer = NMR()
    renderer.to_gpu(device=0)

    masks = renderer.forward_mask(vertices[None, :, :], faces[None, :, :])
    print(np.sum(masks))
    print(masks.shape)

    grad_masks = masks * 0 + 1
    vert_grad = renderer.backward_mask(grad_masks)
    print(np.sum(vert_grad))
    print(vert_grad.shape)

    # Torch API
    mask_renderer = NeuralRenderer()
    vertices_var = torch.autograd.Variable(torch.from_numpy(
        vertices[None, :, :]).cuda(device=0),
                                           requires_grad=True)
    faces_var = torch.autograd.Variable(
        torch.from_numpy(faces[None, :, :]).cuda(device=0))

    for ix in range(100):
        masks_torch = mask_renderer.forward(vertices_var, faces_var)
        vertices_var.grad = None
        masks_torch.backward(torch.from_numpy(grad_masks).cuda(device=0))

    print(torch.sum(masks_torch))
    print(masks_torch.shape)
    print(torch.sum(vertices_var.grad))
Ejemplo n.º 18
0
    def __init__(self, filename_obj, filename_ref=None):
        super(Model, self).__init__()

        with self.init_scope():
            # load .obj
            vertices, faces = neural_renderer.load_obj(filename_obj)
            self.vertices = vertices[None, :, :]
            self.faces = faces[None, :, :]

            # create textures
            texture_size = 2
            textures = np.ones((1, self.faces.shape[1], texture_size, texture_size, texture_size, 3), 'float32')
            self.textures = textures

            # load reference image
            if filename_ref is not None:
                self.image_ref = (scipy.misc.imread(filename_ref).max(-1) != 0).astype('float32')
            else:
                self.image_ref = None

            # camera parameters
            self.camera_position = chainer.Parameter(np.array([6, 10, -14], 'float32'))

            # setup renderer
            renderer = neural_renderer.Renderer()
            renderer.eye = self.camera_position
            self.renderer = renderer
Ejemplo n.º 19
0
    def __init__(self, filename_obj, filename_ref):
        super(Model, self).__init__()

        with self.init_scope():
            # load .obj
            vertices, faces = neural_renderer.load_obj(filename_obj)
            self.vertices = vertices[None, :, :]
            self.faces = faces[None, :, :]

            # create textures
            texture_size = 4
            textures = np.zeros((1, self.faces.shape[1], texture_size,
                                 texture_size, texture_size, 3), 'float32')
            self.textures = chainer.Parameter(textures)

            # load reference image
            self.image_ref = scipy.misc.imread(filename_ref).astype(
                'float32') / 255.

            # setup renderer
            renderer = neural_renderer.Renderer()
            renderer.perspective = False
            renderer.light_intensity_directional = 0.0
            renderer.light_intensity_ambient = 1.0
            self.renderer = renderer
Ejemplo n.º 20
0
    def buffer_all(self):
        # if preset_uv_path:
        #    self.v_attr, self.f_attr = nr.load_obj(cur_obj_fp, normalization = False)

        if self.cfg.DATASET_FVV.PRELOAD_MESHS:
            print(" Buffering meshs...")
            self.objs = {}            
            for keep_idx in self.keep_idxs:
                frame_idx = self.frame_idxs[keep_idx]
                if frame_idx in self.objs:
                    continue
                cur_obj_fp = self.cfg.DATASET_FVV.MESH_DIR%(frame_idx)
                obj_data = {}
                obj_data['v_attr'] , obj_data['f_attr'] = nr.load_obj(cur_obj_fp, normalization = False, use_cuda = False)
                self.objs[frame_idx] = obj_data
                if self.cfg.VERBOSE:
                    print(' Loading mesh: ' + str(int(frame_idx)) + ' ' + cur_obj_fp)

        if self.cfg.DATASET_FVV.PRELOAD_VIEWS:
            # Buffer files
            print("Buffering files...")
            self.views_all = []
            for i in range(self.__len__()):
                if not i % 50:
                    print('Data', i)
                self.views_all.append(self.read_view(i))
Ejemplo n.º 21
0
    def __init__(self, filename_obj, filename_ref):
        super(Model, self).__init__()

        with self.init_scope():
            # load .obj
            vertices, faces = neural_renderer.load_obj(filename_obj)
            self.vertices = chainer.Parameter(vertices[None, :, :])
            self.faces = faces[None, :, :]

            # create textures
            texture_size = 2
            textures = np.ones((1, self.faces.shape[1], texture_size,
                                texture_size, texture_size, 3), 'float32')
            self.textures = textures

            # load reference image
            #self.image_ref = scipy.misc.imread(filename_ref).astype('float32').mean(-1) / 255.
            image_ref = scipy.misc.imread(filename_ref).astype('float32').mean(
                -1) / 255.
            h = image_ref.shape[0]
            w = image_ref.shape[1]
            hcrop = wcrop = self.image_size = 256  #np.min(image_ref.shape[:1])
            top = int(math.floor((h - hcrop) / 2.0))
            bottom = int(math.ceil((h + hcrop) / 2.0))
            left = int(math.floor((w - wcrop) / 2.0))
            right = int(math.ceil((w + wcrop) / 2.0))
            if len(image_ref.shape) > 2:
                self.image_ref = image_ref[top:bottom, left:right, :]
            else:
                self.image_ref = image_ref[top:bottom, left:right]
            # setup renderer
            renderer = neural_renderer.Renderer()
            self.renderer = renderer
Ejemplo n.º 22
0
    def __init__(self, filename_obj, filename_ref=None):
        super(Model, self).__init__()
        # load .obj
        vertices, faces = nr.load_obj(filename_obj)
        self.register_buffer('vertices', vertices[None, :, :])
        self.register_buffer('faces', faces[None, :, :])

        # create textures
        texture_size = 2
        textures = torch.ones(1,
                              self.faces.shape[1],
                              texture_size,
                              texture_size,
                              texture_size,
                              3,
                              dtype=torch.float32)
        self.register_buffer('textures', textures)

        # load reference image
        image_ref = torch.from_numpy(
            (imread(filename_ref).max(-1) != 0).astype(np.float32))
        self.register_buffer('image_ref', image_ref)

        # camera parameters
        self.camera_position = nn.Parameter(
            torch.from_numpy(np.array([6, 10, -14], dtype=np.float32)))

        # setup renderer
        renderer = nr.Renderer(camera_mode='look_at')
        renderer.eye = self.camera_position
        self.renderer = renderer
Ejemplo n.º 23
0
    def __init__(self,
                 filename_obj,
                 dim_in=512,
                 centroid_scale=0.1,
                 bias_scale=1.0,
                 centroid_lr=0.1,
                 bias_lr=1.0):
        super(Decoder, self).__init__()

        with self.init_scope():
            self.vertices_base, self.faces = neural_renderer.load_obj(
                filename_obj)
            self.num_vertices = self.vertices_base.shape[0]
            self.num_faces = self.faces.shape[0]
            self.centroid_scale = centroid_scale
            self.bias_scale = bias_scale
            self.obj_scale = 0.5

            dim = 1024
            dim_hidden = [dim, dim * 2]
            self.linear1 = cl.Linear(dim_in, dim_hidden[0])
            self.linear2 = cl.Linear(dim_hidden[0], dim_hidden[1])
            self.linear_centroids = cl.Linear(dim_hidden[1], 3)
            self.linear_bias = cl.Linear(dim_hidden[1], self.num_vertices * 3)
            self.linear_centroids.W.lr = centroid_lr
            self.linear_centroids.b.lr = centroid_lr
            self.linear_bias.W.lr = bias_lr
            self.linear_bias.b.lr = bias_lr
Ejemplo n.º 24
0
    def __init__(self, filename_obj, filename_ref=None):
        super(Model, self).__init__()

        with self.init_scope():
            # load .obj
            vertices, faces = neural_renderer.load_obj(filename_obj)
            self.vertices = vertices[None, :, :]
            self.faces = faces[None, :, :]

            # create textures
            texture_size = 2
            textures = np.ones((1, self.faces.shape[1], texture_size,
                                texture_size, texture_size, 3), 'float32')
            self.textures = textures

            # load reference image
            if filename_ref is not None:
                self.image_ref = (scipy.misc.imread(filename_ref).max(-1) !=
                                  0).astype('float32')
            else:
                self.image_ref = None

            # camera parameters
            self.camera_position = chainer.Parameter(
                np.array([6, 10, -14], 'float32'))

            # setup renderer
            renderer = neural_renderer.Renderer()
            renderer.eye = self.camera_position
            self.renderer = renderer
Ejemplo n.º 25
0
    def __init__(self,
                 filename_obj,
                 dim_in=512,
                 centroid_scale=0.1,
                 bias_scale=1.0,
                 centroid_lr=0.1,
                 bias_lr=1.0):
        super(Decoder, self).__init__()
        # load .obj
        vertices_base, faces = nr.load_obj(filename_obj)
        # faces = faces[:, list(reversed(list(range(faces.shape[-1]))))]
        self.register_buffer('vertices_base', vertices_base)
        self.register_buffer('faces', faces)

        self.laplacian_loss = LaplacianFaceLoss(vertices_base, faces)

        self.nv = self.vertices_base.size(0)
        self.nf = self.faces.size(0)
        self.centroid_scale = centroid_scale
        self.bias_scale = bias_scale
        self.obj_scale = 0.5

        dim = 1024
        dim_hidden = [dim, dim * 2]
        self.fc1 = nn.Linear(dim_in, dim_hidden[0])
        self.fc2 = nn.Linear(dim_hidden[0], dim_hidden[1])
        self.fc_centroid = nn.Linear(dim_hidden[1], 3)
        self.fc_bias = nn.Linear(dim_hidden[1], self.nv * 3)
Ejemplo n.º 26
0
    def __init__(self,
                 filename_obj,
                 dim_in=512,
                 centroid_scale=0.1,
                 bias_scale=1.0,
                 centroid_lr=0.1,
                 bias_lr=1.0):
        super(Decoder, self).__init__()
        self.vertices_base, self.faces = nr.load_obj(filename_obj)
        self.vertices_base = self.vertices_base.to(device)
        self.faces = self.faces.to(device)

        self.num_vertices = self.vertices_base.shape[0]
        self.num_faces = self.faces.shape[
            0]  # ToDO: add centroid_lr and bias_lr
        self.centroid_scale = centroid_scale
        self.bias_scale = bias_scale
        self.obj_scale = 0.5

        dim = 1024
        dim_hidden = [dim, dim * 2]
        self.linear1 = nn.Linear(dim_in, dim_hidden[0])
        self.linear2 = nn.Linear(dim_hidden[0], dim_hidden[1])
        self.linear_centroids = nn.Linear(dim_hidden[1], 3)
        self.linear_bias = nn.Linear(dim_hidden[1], self.num_vertices * 3)
def run():
    parser = argparse.ArgumentParser()
    parser.add_argument('-i',
                        '--filename_input',
                        type=str,
                        default='./examples/data/teapot.obj')
    parser.add_argument('-o',
                        '--filename_output',
                        type=str,
                        default='./examples/data/example1.gif')
    parser.add_argument('-g', '--gpu', type=int, default=0)
    args = parser.parse_args()
    working_directory = os.path.dirname(args.filename_output)

    # other settings
    camera_distance = 2.732
    elevation = 30
    texture_size = 2

    # load .obj
    vertices, faces = neural_renderer.load_obj(args.filename_input)
    vertices = vertices[
        None, :, :]  # [num_vertices, XYZ] -> [batch_size=1, num_vertices, XYZ]
    faces = faces[None, :, :]  # [num_faces, 3] -> [batch_size=1, num_faces, 3]

    # create texture [batch_size=1, num_faces, texture_size, texture_size, texture_size, RGB]
    textures = np.ones(
        (1, faces.shape[1], texture_size, texture_size, texture_size, 3),
        'float32')

    # to gpu
    chainer.cuda.get_device_from_id(args.gpu).use()
    vertices = chainer.cuda.to_gpu(vertices)
    faces = chainer.cuda.to_gpu(faces)
    textures = chainer.cuda.to_gpu(textures)

    # create renderer
    renderer = neural_renderer.Renderer()

    # draw object
    for num, azimuth in enumerate(range(0, 360, 4)):
        renderer.eye = neural_renderer.get_points_from_angles(
            camera_distance, elevation, azimuth)
        images = renderer.render(
            vertices, faces,
            textures)  # [batch_size, RGB, image_size, image_size]
        image = images.data.get()[0].transpose(
            (1, 2, 0))  # [image_size, image_size, RGB]
        scipy.misc.imsave('%s/_tmp_%04d.png' % (working_directory, num), image)

    # generate gif (need ImageMagick)
    options = '-delay 8 -loop 0 -layers optimize'
    subprocess.call('convert %s %s/_tmp_*.png %s' %
                    (options, working_directory, args.filename_output),
                    shell=True)

    # remove temporary files
    for filename in glob.glob('%s/_tmp_*.png' % working_directory):
        os.remove(filename)
Ejemplo n.º 28
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-i',
                        '--filename_input',
                        type=str,
                        default=os.path.join(data_dir, 'teapot.obj'))
    parser.add_argument('-o',
                        '--filename_output',
                        type=str,
                        default=os.path.join(data_dir, 'example1.gif'))
    parser.add_argument('-g', '--gpu', type=int, default=0)
    args = parser.parse_args()

    # other settings
    camera_distance = 2.732
    elevation = 30
    texture_size = 2

    # load .obj
    vertices, faces = nr.load_obj(args.filename_input)

    vertices = torch.FloatTensor([[0, 0, 0], [0, 1.5, 0], [1, 0, 0]]).cuda()
    faces = torch.IntTensor([[0, 1, 2]]).cuda()

    # self.register_buffer('vertices', vertices[None, :, :])
    # self.register_buffer('faces', faces[None, :, :])

    vertices = vertices[
        None, :, :]  # [num_vertices, XYZ] -> [batch_size=1, num_vertices, XYZ]
    faces = faces[None, :, :]  # [num_faces, 3] -> [batch_size=1, num_faces, 3]

    # create texture [batch_size=1, num_faces, texture_size, texture_size, texture_size, RGB]
    textures = torch.ones(1,
                          faces.shape[1],
                          texture_size,
                          texture_size,
                          texture_size,
                          3,
                          dtype=torch.float32).cuda()

    # to gpu

    # create renderer
    renderer = nr.Renderer(camera_mode='canonical')

    # draw object
    loop = tqdm.tqdm(range(0, 360, 4))
    writer = imageio.get_writer(args.filename_output, mode='I')
    for num, azimuth in enumerate(loop):
        loop.set_description('Drawing')
        renderer.eye = [0, 0, -5]
        images, _, _ = renderer(
            vertices, faces,
            textures)  # [batch_size, RGB, image_size, image_size]
        image = images.detach().cpu().numpy()[0].transpose(
            (1, 2, 0))  # [image_size, image_size, RGB]
        imsave('./examples/tmp/_tmp_test.png', image)
        # writer.append_data((255*image).astype(np.uint8))
    writer.close()
Ejemplo n.º 29
0
def load_teapot_batch(batch_size=4, target_num=2):
    vertices, faces = neural_renderer.load_obj('./tests/data/teapot.obj')
    textures = np.ones((faces.shape[0], 4, 4, 4, 3), 'float32')
    vertices, faces, textures = to_minibatch((vertices, faces, textures), batch_size, target_num)
    vertices = chainer.cuda.to_gpu(vertices)
    faces = chainer.cuda.to_gpu(faces)
    textures = chainer.cuda.to_gpu(textures)
    return vertices, faces, textures
Ejemplo n.º 30
0
 def init_rasterizer(self, obj_path, global_RT):
     self.cur_obj_path = obj_path
     obj_data = {}
     obj_data['v_attr'] , obj_data['f_attr'] = nr.load_obj(obj_path, normalization = False, use_cuda = False)        
     self.rasterizer = network.Rasterizer(self.cfg,
                         obj_data = obj_data,
                         # preset_uv_path = cfg.DATASET_FVV.UV_PATH,
                         global_RT = global_RT)
     self.rasterizer.cuda()
Ejemplo n.º 31
0
def load_teapot_batch(batch_size=4, target_num=2):
    vertices, faces = neural_renderer.load_obj('./tests/data/teapot.obj')
    textures = np.ones((faces.shape[0], 4, 4, 4, 3), 'float32')
    vertices, faces, textures = to_minibatch((vertices, faces, textures),
                                             batch_size, target_num)
    vertices = chainer.cuda.to_gpu(vertices)
    faces = chainer.cuda.to_gpu(faces)
    textures = chainer.cuda.to_gpu(textures)
    return vertices, faces, textures
Ejemplo n.º 32
0
 def __init__(self, filename_obj, batch_size=10, image_size=256):
     super(VertexPredictor, self).__init__()
     vertices, faces = nr.load_obj(filename_obj)
     vertices[:,1]  = - vertices[:,1]
     vertices = vertices[None, :, :]
     vertices = np.tile(vertices, (batch_size,1,1))
     faces = faces[None, :, :]
     faces = np.tile(faces, (batch_size,1,1))
     self.vertices = nn.Parameter(torch.from_numpy(vertices))
     self.register_buffer('faces', torch.from_numpy(faces))
Ejemplo n.º 33
0
def run():
    parser = argparse.ArgumentParser()
    parser.add_argument('-i',
                        '--filename_input',
                        type=str,
                        default='./examples/data/teapot.obj')
    parser.add_argument('-o',
                        '--filename_output',
                        type=str,
                        default='./examples/data/example1.gif')
    parser.add_argument('-g', '--gpu', type=int, default=0)
    args = parser.parse_args()
    working_directory = os.path.dirname(args.filename_output)

    # other settings
    camera_distance = 2.732
    elevation = 30

    # load .obj
    # vertices: [num_vertices, XYZ]
    # faces: # [num_faces, 3]
    vertices, faces = neural_renderer.load_obj(args.filename_input)
    vertices = vertices[None, :, :]  #  -> [batch_size=1, num_vertices, XYZ]

    # to gpu
    chainer.cuda.get_device_from_id(args.gpu).use()
    vertices = chainer.cuda.to_gpu(vertices)
    faces = chainer.cuda.to_gpu(faces)

    # create renderer
    renderer = neural_renderer.Renderer()

    # draw object
    loop = tqdm.tqdm(range(0, 360, 4))
    for num, azimuth in enumerate(loop):
        loop.set_description('Drawing')
        renderer.viewpoints = neural_renderer.get_points_from_angles(
            camera_distance, elevation, azimuth)
        images = renderer.render_silhouettes(
            vertices, faces)  # [batch_size, RGB, image_size, image_size]
        image = images.data.get()[0]  # [image_size, image_size]
        scipy.misc.toimage(image, cmin=0, cmax=1).save(
            '%s/_tmp_%04d.png' % (working_directory, num))

    # generate gif (need ImageMagick)
    options = '-delay 8 -loop 0 -layers optimize'
    subprocess.call('convert %s %s/_tmp_*.png %s' %
                    (options, working_directory, args.filename_output),
                    shell=True)

    # remove temporary files
    for filename in glob.glob('%s/_tmp_*.png' % working_directory):
        os.remove(filename)
Ejemplo n.º 34
0
    def test_texture(self):
        renderer = neural_renderer.Renderer()

        vertices, faces, textures = neural_renderer.load_obj(
            './tests/data/1cde62b063e14777c9152a706245d48/model.obj', load_texture=True)

        vertices = chainer.cuda.to_gpu(vertices)
        faces = chainer.cuda.to_gpu(faces)
        textures = chainer.cuda.to_gpu(textures)
        renderer.eye = neural_renderer.get_points_from_angles(2, 15, 30)
        images = renderer.render(vertices[None, :, :], faces[None, :, :], textures[None, :, :, :, :, :]).data.get()
        scipy.misc.imsave('./tests/data/car.png', scipy.misc.toimage(images[0]))

        vertices, faces, textures = neural_renderer.load_obj(
            './tests/data/4e49873292196f02574b5684eaec43e9/model.obj', load_texture=True, texture_size=16)
        vertices = chainer.cuda.to_gpu(vertices)
        faces = chainer.cuda.to_gpu(faces)
        textures = chainer.cuda.to_gpu(textures)
        renderer.eye = neural_renderer.get_points_from_angles(2, 15, -90)
        images = renderer.render(vertices[None, :, :], faces[None, :, :], textures[None, :, :, :, :, :]).data.get()
        scipy.misc.imsave('./tests/data/display.png', scipy.misc.toimage(images[0]))
Ejemplo n.º 35
0
    def test_tetrahedron(self):
        vertices_ref = np.array(
            [
                [1., 0., 0.],
                [0., 1., 0.],
                [0., 0., 1.],
                [0., 0., 0.]],
            'float32')
        faces_ref = np.array(
            [
                [1, 3, 2],
                [3, 1, 0],
                [2, 0, 1],
                [0, 2, 3]],
            'int32')

        vertices, faces = neural_renderer.load_obj('./tests/data/tetrahedron.obj', False)
        assert (np.allclose(vertices_ref, vertices))
        assert (np.allclose(faces_ref, faces))
        vertices, faces = neural_renderer.load_obj('./tests/data/tetrahedron.obj', True)
        assert (np.allclose(vertices_ref * 2 - 1.0, vertices))
        assert (np.allclose(faces_ref, faces))
Ejemplo n.º 36
0
    def __init__(self, filename_obj, texture_size=4, normalization=True):
        super(Mesh, self).__init__()

        with self.init_scope():
            # load .obj
            vertices, faces = neural_renderer.load_obj(filename_obj, normalization)
            self.vertices = chainer.Parameter(vertices)
            self.faces = faces
            self.num_vertices = self.vertices.shape[0]
            self.num_faces = self.faces.shape[0]

            # create textures
            init = chainer.initializers.Normal()
            shape = (self.num_faces, texture_size, texture_size, texture_size, 3)
            self.textures = chainer.Parameter(init, shape)
            self.texture_size = texture_size
Ejemplo n.º 37
0
def run():
    parser = argparse.ArgumentParser()
    parser.add_argument('-i', '--filename_input', type=str, default='./examples/data/teapot.obj')
    parser.add_argument('-o', '--filename_output', type=str, default='./examples/data/example1.gif')
    parser.add_argument('-g', '--gpu', type=int, default=0)
    args = parser.parse_args()
    working_directory = os.path.dirname(args.filename_output)

    # other settings
    camera_distance = 2.732
    elevation = 30
    texture_size = 2

    # load .obj
    vertices, faces = neural_renderer.load_obj(args.filename_input)
    vertices = vertices[None, :, :]  # [num_vertices, XYZ] -> [batch_size=1, num_vertices, XYZ]
    faces = faces[None, :, :]  # [num_faces, 3] -> [batch_size=1, num_faces, 3]

    # create texture [batch_size=1, num_faces, texture_size, texture_size, texture_size, RGB]
    textures = np.ones((1, faces.shape[1], texture_size, texture_size, texture_size, 3), 'float32')

    # to gpu
    chainer.cuda.get_device_from_id(args.gpu).use()
    vertices = chainer.cuda.to_gpu(vertices)
    faces = chainer.cuda.to_gpu(faces)
    textures = chainer.cuda.to_gpu(textures)

    # create renderer
    renderer = neural_renderer.Renderer()

    # draw object
    loop = tqdm.tqdm(range(0, 360, 4))
    for num, azimuth in enumerate(loop):
        loop.set_description('Drawing')
        renderer.eye = neural_renderer.get_points_from_angles(camera_distance, elevation, azimuth)
        images = renderer.render(vertices, faces, textures)  # [batch_size, RGB, image_size, image_size]
        image = images.data.get()[0].transpose((1, 2, 0))  # [image_size, image_size, RGB]
        scipy.misc.toimage(image, cmin=0, cmax=1).save('%s/_tmp_%04d.png' % (working_directory, num))

    # generate gif (need ImageMagick)
    options = '-delay 8 -loop 0 -layers optimize'
    subprocess.call('convert %s %s/_tmp_*.png %s' % (options, working_directory, args.filename_output), shell=True)

    # remove temporary files
    for filename in glob.glob('%s/_tmp_*.png' % working_directory):
        os.remove(filename)
Ejemplo n.º 38
0
    def __init__(self, filename_obj, filename_ref):
        super(Model, self).__init__()

        with self.init_scope():
            # load .obj
            vertices, faces = neural_renderer.load_obj(filename_obj)
            self.vertices = vertices[None, :, :]
            self.faces = faces[None, :, :]

            # create textures
            texture_size = 4
            textures = np.zeros((1, self.faces.shape[1], texture_size, texture_size, texture_size, 3), 'float32')
            self.textures = chainer.Parameter(textures)

            # load reference image
            self.image_ref = scipy.misc.imread(filename_ref).astype('float32') / 255.

            # setup renderer
            renderer = neural_renderer.Renderer()
            renderer.perspective = False
            renderer.light_intensity_directional = 0.0
            renderer.light_intensity_ambient = 1.0
            self.renderer = renderer
Ejemplo n.º 39
0
 def test_teapot(self):
     vertices, faces = neural_renderer.load_obj('./tests/data/teapot.obj')
     assert (faces.shape[0] == 2464)
     assert (vertices.shape[0] == 1292)
Ejemplo n.º 40
0
def run():
    parser = argparse.ArgumentParser()
    parser.add_argument('-i', '--filename_input', type=str, default='./examples/data/teapot.obj')
    parser.add_argument('-bs', '--batch_size', type=int, default=1)
    parser.add_argument('-is', '--image_size', type=int, default=256)
    parser.add_argument('-us', '--unsafe', type=int, default=0)
    parser.add_argument('-g', '--gpu', type=int, default=0)
    args = parser.parse_args()

    # other settings
    camera_distance = 2.732
    elevation = 30
    texture_size = 2

    # load .obj
    vertices, faces = neural_renderer.load_obj(args.filename_input)
    vertices = vertices[None, :, :]  # [num_vertices, XYZ] -> [batch_size=1, num_vertices, XYZ]
    faces = faces[None, :, :]  # [num_faces, 3] -> [batch_size=1, num_faces, 3]

    # create texture [batch_size=1, num_faces, texture_size, texture_size, texture_size, RGB]
    textures = np.ones((1, faces.shape[1], texture_size, texture_size, texture_size, 3), 'float32')

    # tile to minibatch
    vertices = np.tile(vertices, (args.batch_size, 1, 1))
    faces = np.tile(faces, (args.batch_size, 1, 1))
    textures = np.tile(textures, (args.batch_size, 1, 1, 1, 1, 1))

    # to gpu
    chainer.cuda.get_device_from_id(args.gpu).use()
    vertices = chainer.Variable(chainer.cuda.to_gpu(vertices))
    faces = chainer.cuda.to_gpu(faces)
    textures = chainer.Variable(chainer.cuda.to_gpu(textures))

    # create renderer
    renderer = neural_renderer.Renderer()
    renderer.image_size = args.image_size

    # draw object
    times_forward = []
    times_backward = []
    loop = tqdm.tqdm(range(0, 360, 15))
    for num, azimuth in enumerate(loop):
        loop.set_description('Drawing')
        renderer.eye = neural_renderer.get_points_from_angles(camera_distance, elevation, azimuth)
        time_start = time.time()
        images = renderer.render_silhouettes(vertices, faces)  # [batch_size, image_size, image_size]
        _ = images.data[0, 0, 0].get()
        time_end = time.time()
        times_forward.append(time_end - time_start)
        loss = chainer.functions.sum(images)
        _ = loss.data.get()
        time_start = time.time()
        loss.backward()
        time_end = time.time()
        times_backward.append(time_end - time_start)

    print 'silhouette forward time: %.3f ms' % (np.sum(times_forward[1:]) / len(times_forward[1:]))
    print 'silhouette backward time: %.3f ms' % (np.sum(times_backward[1:]) / len(times_backward[1:]))

    # draw object
    times_forward = []
    times_backward = []
    loop = tqdm.tqdm(range(0, 360, 15))
    for num, azimuth in enumerate(loop):
        loop.set_description('Drawing')
        renderer.eye = neural_renderer.get_points_from_angles(camera_distance, elevation, azimuth)
        time_start = time.time()
        images = renderer.render(vertices, faces, textures)  # [batch_size, RGB, image_size, image_size]
        _ = images.data[0, 0, 0, 0].get()
        time_end = time.time()
        times_forward.append(time_end - time_start)
        loss = chainer.functions.sum(images)
        _ = loss.data.get()
        time_start = time.time()
        loss.backward()
        time_end = time.time()
        times_backward.append(time_end - time_start)

    print 'texture forward time: %.3f ms' % (np.sum(times_forward[1:]) / len(times_forward[1:]))
    print 'texture backward time: %.3f ms' % (np.sum(times_backward[1:]) / len(times_backward[1:]))