def test_forward_case1(self):
        # Render Chainer version
        target_num = 2

        vertices, faces = neural_renderer_chainer.load_obj(
            './tests_torch/data/teapot.obj')
        vertices_batch = np.tile(vertices[None, :, :], (4, 1, 1)) * 0
        vertices_batch[target_num] = vertices
        vertices, faces = neural_renderer_chainer.to_gpu(
            (vertices_batch, faces))
        renderer = neural_renderer_chainer.Renderer()
        renderer.anti_aliasing = False
        renderer.viewpoints = neural_renderer_chainer.get_points_from_angles(
            2.732, 0, 0)
        images_chainer = renderer.render_silhouettes(vertices,
                                                     faces).data[target_num]

        target_num = 2
        vertices, faces = neural_renderer_torch.load_obj(
            './tests_torch/data/teapot.obj')
        vertices_batch = np.tile(vertices[None, :, :], (4, 1, 1)) * 0
        vertices_batch[target_num] = vertices
        vertices, faces = neural_renderer_torch.to_gpu((vertices_batch, faces))

        renderer = neural_renderer_torch.Renderer()
        renderer.anti_aliasing = False
        renderer.viewpoints = neural_renderer_torch.get_points_from_angles(
            2.732, 0, 0)
        images_pytorch = renderer.render_silhouettes(vertices,
                                                     faces).data[target_num]

        np.testing.assert_allclose(images_pytorch.cpu().numpy(),
                                   images_chainer.get(),
                                   atol=2e-3)
 def __call__(self, textures):
     self.renderer.viewpoints = neural_renderer.get_points_from_angles(
         2.732, 0, np.random.uniform(0, 360))
     images = self.renderer.render_rgb(self.vertices, self.faces,
                                       self.vertices_t, self.faces_t,
                                       torch.tanh(textures))
     loss = torch.sum((images[0] - self.image_ref.permute((2, 0, 1)))**2)
     return loss
    def test_forward_case2(self):
        data = [
            [
                './tests_torch/data/4e49873292196f02574b5684eaec43e9/model.obj',
                neural_renderer_torch.get_points_from_angles(2.5, 10, -90),
                './tests_torch/data/4e49873292196f02574b5684eaec43e9.png',
            ],
            [
                './tests_torch/data/1cde62b063e14777c9152a706245d48/model.obj',
                neural_renderer_torch.get_points_from_angles(2.5, 10, 60),
                './tests_torch/data/1cde62b063e14777c9152a706245d48.png',
            ]
        ]

        for i, (filename, viewpoint, reference) in enumerate(data):
            # Render Chainer
            renderer = neural_renderer_chainer.Renderer()
            renderer.draw_backside = False
            renderer.viewpoints = viewpoint
            vertices, faces, vertices_t, faces_t, textures = neural_renderer_chainer.load_obj(
                filename, load_textures=True)
            vertices, faces, vertices_t, faces_t, textures = neural_renderer_chainer.to_gpu(
                (vertices[None, :, :], faces, vertices_t[None, :, :], faces_t,
                 textures[None, :, :, :]))
            images = renderer.render(vertices, faces, vertices_t, faces_t,
                                     textures).data
            image_chainer = images[0].transpose((1, 2, 0)).get()

            # Render PyTorch
            renderer = neural_renderer_torch.Renderer()
            renderer.draw_backside = False
            renderer.viewpoints = viewpoint
            vertices, faces, vertices_t, faces_t, textures = neural_renderer_torch.load_obj(
                filename, load_textures=True)
            vertices, faces, vertices_t, faces_t, textures = neural_renderer_torch.to_gpu(
                (vertices[None, :, :], faces, vertices_t[None, :, :], faces_t,
                 textures[None, :, :, :]))

            images = renderer.render(vertices, faces, vertices_t, faces_t,
                                     textures)
            images = images.cpu().numpy()
            image_pytorch = images[0].transpose((1, 2, 0))

            assert np.mean(np.abs(image_chainer - image_pytorch)) < 1e-4
def run():
    args = parse_arguments()
    working_dir = os.path.dirname(args.output_res_file)

    # Currently, only .obj files are supported.
    if not args.input_obj_file.endswith('.obj'):
        raise RuntimeError('Only .obj files are currently supported as input.')

    model = Model(args.input_obj_file, args.input_ref_file)
    model.to(args.gpu)

    # Create the optimizer object.
    optimizer = torch.optim.Adam(model.parameters())

    # Run the optimization loop.
    loop = tqdm.tqdm(range(300))
    for i in loop:
        loop.set_description('Optimizing')
        optimizer.zero_grad()
        loss = model()
        loss.backward()
        optimizer.step()

        # Scale each frame to the [0, 255] interval.
        image = model.renderer.render_silhouettes(model.vertices, model.faces)
        image = image.detach()[0].cpu().numpy()
        min_val, max_val = image.min(), image.max()
        image = (image - min_val) / (max_val - min_val) * 255

        # Save each frame to the working directory.
        image = Image.fromarray(image.astype(np.uint8))
        image.save('%s/_tmp_%04d.png' % (working_dir, i))

    make_gif(working_dir, args.output_opt_file)

    # Run the rendering loop.
    loop = tqdm.tqdm(range(0, 360, 4))
    for num, azimuth in enumerate(loop):
        loop.set_description('Rendering')
        model.renderer.viewpoints = neural_renderer_torch.get_points_from_angles(
            CAMERA_DISTANCE, ELEVATION, azimuth)

        # Scale each frame to the [0, 255] interval.
        image = model.renderer.render_silhouettes(model.vertices, model.faces)
        image = image.detach()[0].cpu().numpy()
        min_val, max_val = image.min(), image.max()
        image = (image - min_val) / (max_val - min_val) * 255

        # Save each frame to the working directory.
        image = Image.fromarray(image.astype(np.uint8))
        image.save('%s/_tmp_%04d.png' % (working_dir, num))

    make_gif(working_dir, args.output_res_file)
Beispiel #5
0
def make_ref_image(input_ref_file, input_obj_file, gpu):
    model = Model(input_obj_file)
    model.to(gpu)

    model.renderer.viewpoints = neural_renderer_torch.get_points_from_angles(
        2.732, 30, -15)
    images = model.renderer.render_silhouettes(model.vertices, model.faces)
    image = images.detach()[0].cpu().numpy()
    min_val, max_val = image.min(), image.max()
    image = (image - min_val) / (max_val - min_val) * 255

    image = Image.fromarray(image.astype(np.uint8))
    image.save(input_ref_file)
Beispiel #6
0
    def test_case1(self):
        data = [
            [
                './tests_chainer/data/4e49873292196f02574b5684eaec43e9/model.obj',
                neural_renderer_torch.get_points_from_angles(2.5, 10, -90),
                './tests_chainer/data/4e49873292196f02574b5684eaec43e9.png',
            ],
            [
                './tests_chainer/data/1cde62b063e14777c9152a706245d48/model.obj',
                neural_renderer_torch.get_points_from_angles(2.5, 10, 60),
                './tests_chainer/data/1cde62b063e14777c9152a706245d48.png',
            ]
        ]
        filename_tmp = './tests_chainer/data/tmp.obj'

        renderer = neural_renderer_torch.Renderer()
        renderer.draw_backside = False
        for i, (filename, viewpoint, reference) in enumerate(data):
            renderer.viewpoints = viewpoint
            ref = neural_renderer_torch.imread(reference)

            vertices, faces, vertices_t, faces_t, textures = neural_renderer_torch.load_obj(
                filename, load_textures=True)
            neural_renderer_torch.save_obj(filename_tmp, vertices, faces,
                                           vertices_t, faces_t, textures)
            vertices, faces, vertices_t, faces_t, textures = neural_renderer_torch.load_obj(
                filename_tmp, load_textures=True)
            vertices, faces, vertices_t, faces_t, textures = neural_renderer_torch.to_gpu(
                (vertices[None, :, :], faces, vertices_t[None, :, :], faces_t,
                 textures[None, :, :, :]))

            images = renderer.render(vertices, faces, vertices_t, faces_t,
                                     textures).data
            image = images[0].transpose((1, 2, 0))

            chainer.testing.assert_allclose(ref, image, atol=1e-2, rtol=1e-2)

        for f in glob.glob('./tests_chainer/data/tmp*'):
            os.remove(f)
def run():
    args = parse_arguments()
    working_dir = os.path.dirname(args.output_file)

    # Currently, only .obj files are supported.
    if not args.input_file.endswith('.obj'):
        raise RuntimeError('Only .obj files are currently supported as input.')

    # Load the input data:
    #    vertices: [num_vertices, 3]
    #    faces: # [num_faces, 3]
    vertices, faces = neural_renderer_torch.load_obj(args.input_file)

    # Add a batch size of 1:
    #    vertices: [1, num_vertices, 3]
    vertices = vertices[None, :, :]

    # Upload the data to the GPU.
    device = torch.device('cuda:' + str(args.gpu))
    torch.cuda.set_device(device)

    vertices = torch.tensor(vertices, device=device)
    faces = torch.tensor(faces, device=device)

    # Create the renderer object.
    renderer = neural_renderer_torch.Renderer()

    # Run the rendering loop.
    loop = tqdm.tqdm(range(0, 360, 4))

    for num, azimuth in enumerate(loop):
        loop.set_description('Rendering')
        renderer.viewpoints = neural_renderer_torch.get_points_from_angles(
            CAMERA_DISTANCE, ELEVATION, azimuth)

        # Scale each frame to the [0, 255] interval.
        image = renderer.render_silhouettes(vertices, faces)[0].cpu().numpy()
        min_val, max_val = image.min(), image.max()
        image = (image - min_val) / (max_val - min_val) * 255

        # Save each frame to the working directory.
        image = Image.fromarray(image.astype(np.uint8))
        image.save('%s/_tmp_%04d.png' % (working_dir, num))

    make_gif(working_dir, args.output_file)
def run():
    parser = argparse.ArgumentParser()
    parser.add_argument('-io',
                        '--filename_obj',
                        type=str,
                        default='./examples_chainer/data/teapot.obj')
    parser.add_argument('-ir',
                        '--filename_ref',
                        type=str,
                        default='./examples_chainer/data/example3_ref.png')
    parser.add_argument('-or',
                        '--filename_output',
                        type=str,
                        default='./examples_chainer/data/example3_result.gif')
    parser.add_argument('-g', '--gpu', type=int, default=0)
    args = parser.parse_args()
    working_directory = os.path.dirname(args.filename_output)

    model = Model(args.filename_obj, args.filename_ref)
    model.to(args.gpu)

    optimizer = torch.optim.Adam(model.parameters(), lr=0.01)

    loop = tqdm.tqdm(range(300))
    for _ in loop:
        loop.set_description('Optimizing')
        optimizer.zero_grad()
        loss = model(model.textures)
        loss.backward()
        optimizer.step()

    # draw object
    loop = tqdm.tqdm(range(0, 360, 4))
    for num, azimuth in enumerate(loop):
        loop.set_description('Drawing')
        model.renderer.viewpoints = neural_renderer.get_points_from_angles(
            2.732, 0, azimuth)
        images = model.renderer.render_rgb(model.vertices, model.faces,
                                           model.vertices_t, model.faces_t,
                                           torch.tanh(model.textures))
        image = images.cpu().detach().numpy()[0].transpose((1, 2, 0))
        image = np.maximum(image, 0) * 255  # Crop negative values
        image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
        cv2.imwrite('%s/_tmp_%04d.png' % (working_directory, num), image)
    make_gif(working_directory, args.filename_output)
def run():
    args = parse_arguments()
    working_dir = os.path.dirname(args.output_res_file)

    # Currently, only .obj files are supported.
    if not args.input_obj_file.endswith('.obj'):
        raise RuntimeError('Only .obj files are currently supported as input.')

    model = Model(args.input_obj_file, args.input_ref_file)
    model.to(args.gpu)

    # Create the optimizer object.
    optimizer = torch.optim.Adam(model.parameters(), lr=0.01)

    # Run the optimization loop.
    loop = tqdm.tqdm(range(300))
    for _ in loop:
        loop.set_description('Optimizing')
        optimizer.zero_grad()
        loss = model(model.textures)
        loss.backward()
        optimizer.step()

    # Run the optimization loop.
    loop = tqdm.tqdm(range(0, 360, 4))
    for num, azimuth in enumerate(loop):
        loop.set_description('Rendering')
        model.renderer.viewpoints = neural_renderer.get_points_from_angles(
            CAMERA_DISTANCE, ELEVATION, azimuth)

        images = model.renderer.render_rgb(model.vertices, model.faces,
                                           model.vertices_t, model.faces_t,
                                           torch.tanh(model.textures))
        image = images.detach()[0].cpu().numpy().transpose((1, 2, 0))
        image = np.maximum(image, 0) * 255  # Crop negative values

        # Save each frame to the working directory.
        image = Image.fromarray(image.astype(np.uint8))
        image.save('%s/_tmp_%04d.png' % (working_dir, num))

    make_gif(working_dir, args.output_res_file)
    def test_forward_case3(self):
        # Render with Chainer
        target_num = 2
        vertices, faces = neural_renderer_chainer.load_obj(
            './tests_torch/data/teapot.obj')
        vertices_batch = np.tile(vertices[None, :, :], (4, 1, 1)) * 0
        vertices_batch[target_num] = vertices
        vertices, faces = neural_renderer_chainer.to_gpu(
            (vertices_batch, faces))

        renderer = neural_renderer_chainer.Renderer()
        renderer.anti_aliasing = False
        renderer.viewpoints = neural_renderer_chainer.get_points_from_angles(
            2, 30., 0)
        images = renderer.render_depth(vertices, faces).data[target_num]
        images_chainer = (images - images.min()) / (images.max() -
                                                    images.min())
        images_chainer = images_chainer.get()

        # Render with PyTorch
        target_num = 2
        vertices, faces = neural_renderer_torch.load_obj(
            './tests_torch/data/teapot.obj')
        vertices_batch = np.tile(vertices[None, :, :], (4, 1, 1)) * 0
        vertices_batch[target_num] = vertices
        vertices, faces = neural_renderer_torch.to_gpu((vertices_batch, faces))

        renderer = neural_renderer_torch.Renderer()
        renderer.anti_aliasing = False
        renderer.viewpoints = neural_renderer_torch.get_points_from_angles(
            2, 30., 0)
        images = renderer.render_depth(vertices, faces).data[target_num]
        images_pytorch = (images - images.min()) / (images.max() -
                                                    images.min())
        images_pytorch = images_pytorch.cpu().numpy()

        assert np.mean(np.abs(images_pytorch - images_chainer)) < 1e-4
    def test_forward_case4(self):
        # Render with Chainer

        target_num = 2
        vertices, faces = neural_renderer_chainer.load_obj(
            './tests_torch/data/teapot.obj')
        vertices_batch = np.tile(vertices[None, :, :], (4, 1, 1)) * 0
        vertices_batch[target_num] = vertices
        vertices, faces = neural_renderer_chainer.to_gpu(
            (vertices_batch, faces))
        vertices_t, faces_t, textures = neural_renderer_chainer.create_textures(
            faces.shape[0])
        vertices_t = chainer.functions.tile(vertices_t[None, :, :],
                                            (4, 1, 1)).data
        textures = chainer.functions.tile(textures[None, :, :, :],
                                          (4, 1, 1, 1)).data
        vertices_t = chainer.cuda.to_gpu(vertices_t)
        faces_t = chainer.cuda.to_gpu(faces_t)
        textures = chainer.cuda.to_gpu(textures)

        lights = []
        light_color = cp.asarray([[0.47481096, 0.7131511, 0.4510043],
                                  [0.49120015, 0.161955, 0.71638113],
                                  [0.32655084, 0.7805874, 0.7682426],
                                  [0.42193118, 0.90416473,
                                   0.5267034]]).astype(cp.float32)
        light_direction = cp.asarray([[0.328245, 0.8916046, 0.31189483],
                                      [0.99824226, 0.05838178, 0.00867782],
                                      [0.35747865, 0.61983925, 0.6985467],
                                      [0.0393897, 0.6937492,
                                       0.7191179]]).astype(cp.float32)
        lights.append(
            neural_renderer_chainer.DirectionalLight(light_color,
                                                     light_direction))

        light_color = cp.asarray([[0.2732121, 0.09439224, 0.38380036],
                                  [0.06487979, 0.02794903, 0.261018],
                                  [0.28739947, 0.2996951, 0.42412606],
                                  [0.10019363, 0.26517034,
                                   0.07372955]]).astype(cp.float32)
        lights.append(neural_renderer_chainer.AmbientLight(light_color))

        light_color = cp.asarray([[0.32410273, 0.24369295, 0.3126097],
                                  [0.3456873, 0.24514836, 0.21663068],
                                  [0.33004418, 0.25533527, 0.48039845],
                                  [0.29468802, 0.44377372,
                                   0.10724097]]).astype(cp.float32)
        lights.append(neural_renderer_chainer.SpecularLight(light_color))

        renderer = neural_renderer_chainer.Renderer()
        renderer.viewpoints = neural_renderer_chainer.get_points_from_angles(
            2.732, 30, 30)
        renderer.draw_backside = False
        images_chainer = renderer.render_rgb(vertices,
                                             faces,
                                             vertices_t,
                                             faces_t,
                                             textures,
                                             lights=lights).data[target_num]
        images_chainer = images_chainer.get()

        # Render with PyTorch
        target_num = 2
        vertices, faces = neural_renderer_torch.load_obj(
            './tests_torch/data/teapot.obj')
        vertices_batch = np.tile(vertices[None, :, :], (4, 1, 1)) * 0
        vertices_batch[target_num] = vertices
        vertices, faces = neural_renderer_torch.to_gpu((vertices_batch, faces))
        vertices_t, faces_t, textures = neural_renderer_torch.create_textures(
            faces.shape[0])
        vertices_t = torch.as_tensor(vertices_t[None, :, :]).expand(
            (4, *vertices_t.shape))
        faces_t = torch.as_tensor(faces_t)
        textures = torch.as_tensor(textures[None, :, :, :]).expand(
            (4, *textures.shape))
        vertices_t = vertices_t.cuda()
        faces_t = faces_t.cuda()
        textures = textures.cuda()

        lights = []

        light_color = torch.as_tensor([[0.47481096, 0.7131511, 0.4510043],
                                       [0.49120015, 0.161955, 0.71638113],
                                       [0.32655084, 0.7805874, 0.7682426],
                                       [0.42193118, 0.90416473,
                                        0.5267034]]).type(torch.float32)
        light_direction = torch.as_tensor(
            [[0.328245, 0.8916046, 0.31189483],
             [0.99824226, 0.05838178, 0.00867782],
             [0.35747865, 0.61983925, 0.6985467],
             [0.0393897, 0.6937492, 0.7191179]]).type(torch.float32)
        lights.append(
            neural_renderer_torch.DirectionalLight(light_color,
                                                   light_direction))

        light_color = torch.as_tensor([[0.2732121, 0.09439224, 0.38380036],
                                       [0.06487979, 0.02794903, 0.261018],
                                       [0.28739947, 0.2996951, 0.42412606],
                                       [0.10019363, 0.26517034,
                                        0.07372955]]).type(torch.float32)
        lights.append(neural_renderer_torch.AmbientLight(light_color))

        light_color = torch.as_tensor([[0.32410273, 0.24369295, 0.3126097],
                                       [0.3456873, 0.24514836, 0.21663068],
                                       [0.33004418, 0.25533527, 0.48039845],
                                       [0.29468802, 0.44377372,
                                        0.10724097]]).type(torch.float32)
        lights.append(neural_renderer_torch.SpecularLight(light_color))

        renderer = neural_renderer_torch.Renderer()
        renderer.viewpoints = neural_renderer_torch.get_points_from_angles(
            2.732, 30, 30)
        renderer.draw_backside = False
        images = renderer.render_rgb(vertices,
                                     faces,
                                     vertices_t,
                                     faces_t,
                                     textures,
                                     lights=lights).data[target_num]
        images_pytorch = images.cpu().numpy()

        assert np.mean(np.abs(images_pytorch - images_chainer)) < 1e-4
 def __call__(self):
     self.renderer.viewpoints = neural_renderer_torch.get_points_from_angles(
         CAMERA_DISTANCE, ELEVATION, AZIMUTH)
     image = self.renderer.render_silhouettes(self.vertices, self.faces)
     loss = torch.sum(torch.pow(image - self.image_ref[None, :, :], 2))
     return loss
Beispiel #13
0
def run():
    parser = argparse.ArgumentParser()
    parser.add_argument('-i',
                        '--filename_input',
                        type=str,
                        default='./examples/data/teapot.obj')
    parser.add_argument('-bs', '--batch_size', type=int, default=1)
    parser.add_argument('-is', '--image_size', type=int, default=256)
    parser.add_argument('-us', '--unsafe', type=int, default=0)
    parser.add_argument('-g', '--gpu', type=int, default=0)
    args = parser.parse_args()

    # other settings
    camera_distance = 2.732
    elevation = 30
    texture_size = 2

    # load .obj
    vertices, faces = neural_renderer_torch.load_obj(args.filename_input)
    vertices = vertices[
        None, :, :]  # [num_vertices, XYZ] -> [batch_size=1, num_vertices, XYZ]

    # create texture [batch_size=1, num_faces, texture_size, texture_size, texture_size, RGB]
    vertices_t, faces_t, textures = neural_renderer_torch.create_textures(
        faces.shape[0])

    # tile to minibatch
    vertices = np.tile(vertices, (args.batch_size, 1, 1))
    vertices_t = np.tile(vertices_t, (args.batch_size, 1, 1))
    textures = np.tile(textures, (args.batch_size, 1, 1, 1))

    # to gpu
    chainer.cuda.get_device_from_id(args.gpu).use()
    vertices = chainer.Variable(chainer.cuda.to_gpu(vertices))
    faces = chainer.cuda.to_gpu(faces)
    vertices_t = chainer.Variable(chainer.cuda.to_gpu(vertices_t))
    faces_t = chainer.cuda.to_gpu(faces_t)
    textures = chainer.Variable(chainer.cuda.to_gpu(textures))

    # create renderer
    renderer = neural_renderer_torch.Renderer()
    renderer.image_size = args.image_size

    # draw object
    times_forward = []
    times_backward = []
    loop = tqdm.tqdm(range(0, 360, 15))
    for num, azimuth in enumerate(loop):
        loop.set_description('Drawing')
        renderer.viewpoints = neural_renderer_torch.get_points_from_angles(
            camera_distance, elevation, azimuth)
        time_start = time.time()
        images = renderer.render_silhouettes(
            vertices, faces)  # [batch_size, image_size, image_size]
        _ = images.data[0, 0, 0].get()
        time_end = time.time()
        times_forward.append(time_end - time_start)
        loss = chainer.functions.sum(images)
        _ = loss.data.get()
        time_start = time.time()
        loss.backward()
        time_end = time.time()
        times_backward.append(time_end - time_start)

    print('silhouette forward time: %.3f ms' %
          (np.sum(times_forward[1:]) / len(times_forward[1:])))
    print('silhouette backward time: %.3f ms' %
          (np.sum(times_backward[1:]) / len(times_backward[1:])))

    # draw object
    times_forward = []
    times_backward = []
    loop = tqdm.tqdm(range(0, 360, 15))
    for num, azimuth in enumerate(loop):
        loop.set_description('Drawing')
        renderer.viewpoints = neural_renderer_torch.get_points_from_angles(
            camera_distance, elevation, azimuth)
        time_start = time.time()
        images = renderer.render(
            vertices, faces, vertices_t, faces_t,
            textures)  # [batch_size, RGB, image_size, image_size]
        _ = images.data[0, 0, 0, 0].get()
        time_end = time.time()
        times_forward.append(time_end - time_start)
        loss = chainer.functions.sum(images)
        _ = loss.data.get()
        time_start = time.time()
        loss.backward()
        time_end = time.time()
        times_backward.append(time_end - time_start)

    print('texture forward time: %.3f ms' %
          (np.sum(times_forward[1:]) / len(times_forward[1:])))
    print('texture backward time: %.3f ms' %
          (np.sum(times_backward[1:]) / len(times_backward[1:])))