def __init__(self, input_obj_file, input_ref_file):
        super(Model, self).__init__()

        # Load the OBJ file.
        vertices, faces = neural_renderer.load_obj(input_obj_file)
        self.vertices = torch.as_tensor(vertices[None, :, :])
        self.faces = torch.as_tensor(faces)

        # Create the textures.
        vertices_t, faces_t, textures = neural_renderer.create_textures(
            self.faces.shape[0], texture_size=4)
        self.vertices_t = torch.as_tensor(vertices_t[None, :, :])
        self.faces_t = torch.as_tensor(faces_t)
        self.textures = torch.nn.Parameter(torch.as_tensor(
            textures[None, :, :, :]),
                                           requires_grad=True)

        # Load the reference image.
        self.image_ref = neural_renderer.imread(input_ref_file)
        self.image_ref = torch.as_tensor(self.image_ref)

        # Set up the renderer.
        renderer = neural_renderer.Renderer()
        renderer.perspective = False
        self.renderer = renderer
    def __init__(self, filename_obj, filename_ref):
        super(Model, self).__init__()

        # load .obj
        vertices, faces = neural_renderer.load_obj(filename_obj)
        self.vertices = torch.as_tensor(vertices[None, :, :])
        self.faces = torch.as_tensor(faces)

        # create textures
        vertices_t, faces_t, textures = neural_renderer.create_textures(
            self.faces.shape[0], texture_size=4)
        self.vertices_t = torch.as_tensor(vertices_t[None, :, :])
        self.faces_t = torch.as_tensor(faces_t)
        self.textures = torch.nn.Parameter(torch.as_tensor(
            textures[None, :, :, :]),
                                           requires_grad=True)

        # load reference image
        self.image_ref = neural_renderer.imread(filename_ref)
        self.image_ref = torch.as_tensor(self.image_ref)

        # setup renderer
        renderer = neural_renderer.Renderer()
        renderer.perspective = False
        self.renderer = renderer
    def test_backward_case1(self):
        vertices = [
            [0.1, 0.1, 1.],
            [-0.1, 0.1, 1.],
            [-0.1, -0.1, 1.],
            [0.1, -0.1, 1.],
        ]
        faces = [[0, 1, 2], [0, 2, 3]]

        ref = neural_renderer_torch.imread('./tests_torch/data/gradient.png')
        ref = 1 - ref
        ref = ref[:, :, 0]
        ref = torch.as_tensor(ref).cuda()

        vertices = np.array(vertices, 'float32')
        faces = np.array(faces, 'int32')
        vertices, faces = neural_renderer_torch.to_gpu((vertices, faces))
        vertices = Parameter(vertices, True)
        faces = torch.as_tensor(faces)

        optimizer = torch.optim.Adam([vertices], lr=0.005)
        ref = torch.as_tensor(ref)

        os.makedirs("tmp", exist_ok=True)

        for i in range(350):
            params = RasterizeParam()
            hyperparams = RasterizeHyperparam(image_size=256,
                                              anti_aliasing=False)
            images = neural_renderer_torch.rasterize_silhouettes(
                vertices[None, :, :], faces, params, hyperparams)

            image = images[0]
            iou = torch.sum(image * ref) / torch.sum(image + ref - image * ref)
            iou = torch.as_tensor(1, dtype=torch.float32,
                                  device=iou.device) - iou
            loss = iou

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            pylab.imsave('tmp/t%d.png' % i, image.data.cpu().numpy())
            print(i, loss.data, iou.data)

            if float(iou.data) < 0.01:
                return
        raise Exception
示例#4
0
    def test_case1(self):
        data = [
            [
                './tests_chainer/data/4e49873292196f02574b5684eaec43e9/model.obj',
                neural_renderer_torch.get_points_from_angles(2.5, 10, -90),
                './tests_chainer/data/4e49873292196f02574b5684eaec43e9.png',
            ],
            [
                './tests_chainer/data/1cde62b063e14777c9152a706245d48/model.obj',
                neural_renderer_torch.get_points_from_angles(2.5, 10, 60),
                './tests_chainer/data/1cde62b063e14777c9152a706245d48.png',
            ]
        ]
        filename_tmp = './tests_chainer/data/tmp.obj'

        renderer = neural_renderer_torch.Renderer()
        renderer.draw_backside = False
        for i, (filename, viewpoint, reference) in enumerate(data):
            renderer.viewpoints = viewpoint
            ref = neural_renderer_torch.imread(reference)

            vertices, faces, vertices_t, faces_t, textures = neural_renderer_torch.load_obj(
                filename, load_textures=True)
            neural_renderer_torch.save_obj(filename_tmp, vertices, faces,
                                           vertices_t, faces_t, textures)
            vertices, faces, vertices_t, faces_t, textures = neural_renderer_torch.load_obj(
                filename_tmp, load_textures=True)
            vertices, faces, vertices_t, faces_t, textures = neural_renderer_torch.to_gpu(
                (vertices[None, :, :], faces, vertices_t[None, :, :], faces_t,
                 textures[None, :, :, :]))

            images = renderer.render(vertices, faces, vertices_t, faces_t,
                                     textures).data
            image = images[0].transpose((1, 2, 0))

            chainer.testing.assert_allclose(ref, image, atol=1e-2, rtol=1e-2)

        for f in glob.glob('./tests_chainer/data/tmp*'):
            os.remove(f)
示例#5
0
    def __init__(self, input_obj_file, input_ref_file=None):
        super(Model, self).__init__()

        # Load the OBJ file.
        vertices, faces = neural_renderer_torch.load_obj(input_obj_file)
        self.vertices = torch.as_tensor(vertices[None, :, :])
        self.faces = torch.as_tensor(faces)

        # Load the reference image.
        if input_ref_file is not None:
            self.image_ref = torch.as_tensor(
                neural_renderer_torch.imread(input_ref_file))
        else:
            self.image_ref = None

        # Set up the camera parameters.
        self.camera_position = torch.nn.Parameter(
            torch.tensor([6, 10, -14], dtype=torch.float32))

        # Set up the renderer.
        renderer = neural_renderer_torch.Renderer()
        renderer.viewpoints = self.camera_position
        self.renderer = renderer