def __init__(self, filename_obj, filename_ref): super(Model, self).__init__() # load .obj vertices, faces = neural_renderer.load_obj(filename_obj) self.vertices = torch.as_tensor(vertices[None, :, :]) self.faces = torch.as_tensor(faces) # create textures vertices_t, faces_t, textures = neural_renderer.create_textures( self.faces.shape[0], texture_size=4) self.vertices_t = torch.as_tensor(vertices_t[None, :, :]) self.faces_t = torch.as_tensor(faces_t) self.textures = torch.nn.Parameter(torch.as_tensor( textures[None, :, :, :]), requires_grad=True) # load reference image self.image_ref = neural_renderer.imread(filename_ref) self.image_ref = torch.as_tensor(self.image_ref) # setup renderer renderer = neural_renderer.Renderer() renderer.perspective = False self.renderer = renderer
def __init__(self, input_obj_file, input_ref_file): super(Model, self).__init__() # Load the OBJ file. vertices, faces = neural_renderer.load_obj(input_obj_file) self.vertices = torch.as_tensor(vertices[None, :, :]) self.faces = torch.as_tensor(faces) # Create the textures. vertices_t, faces_t, textures = neural_renderer.create_textures( self.faces.shape[0], texture_size=4) self.vertices_t = torch.as_tensor(vertices_t[None, :, :]) self.faces_t = torch.as_tensor(faces_t) self.textures = torch.nn.Parameter(torch.as_tensor( textures[None, :, :, :]), requires_grad=True) # Load the reference image. self.image_ref = neural_renderer.imread(input_ref_file) self.image_ref = torch.as_tensor(self.image_ref) # Set up the renderer. renderer = neural_renderer.Renderer() renderer.perspective = False self.renderer = renderer
def test_forward_case1(self): # Render Chainer version target_num = 2 vertices, faces = neural_renderer_chainer.load_obj( './tests_torch/data/teapot.obj') vertices_batch = np.tile(vertices[None, :, :], (4, 1, 1)) * 0 vertices_batch[target_num] = vertices vertices, faces = neural_renderer_chainer.to_gpu( (vertices_batch, faces)) renderer = neural_renderer_chainer.Renderer() renderer.anti_aliasing = False renderer.viewpoints = neural_renderer_chainer.get_points_from_angles( 2.732, 0, 0) images_chainer = renderer.render_silhouettes(vertices, faces).data[target_num] target_num = 2 vertices, faces = neural_renderer_torch.load_obj( './tests_torch/data/teapot.obj') vertices_batch = np.tile(vertices[None, :, :], (4, 1, 1)) * 0 vertices_batch[target_num] = vertices vertices, faces = neural_renderer_torch.to_gpu((vertices_batch, faces)) renderer = neural_renderer_torch.Renderer() renderer.anti_aliasing = False renderer.viewpoints = neural_renderer_torch.get_points_from_angles( 2.732, 0, 0) images_pytorch = renderer.render_silhouettes(vertices, faces).data[target_num] np.testing.assert_allclose(images_pytorch.cpu().numpy(), images_chainer.get(), atol=2e-3)
def __init__(self, input_obj_file, input_ref_file): super(Model, self).__init__() # Load the OBJ file. vertices, faces = neural_renderer_torch.load_obj(input_obj_file) self.vertices = torch.nn.Parameter( torch.as_tensor(vertices[None, :, :])) self.faces = torch.as_tensor(faces) # Load the reference image. self.image_ref = torch.as_tensor(imread(input_ref_file).mean(-1)) # Set up the renderer. renderer = neural_renderer_torch.Renderer() self.renderer = renderer
def run(): args = parse_arguments() working_dir = os.path.dirname(args.output_file) # Currently, only .obj files are supported. if not args.input_file.endswith('.obj'): raise RuntimeError('Only .obj files are currently supported as input.') # Load the input data: # vertices: [num_vertices, 3] # faces: # [num_faces, 3] vertices, faces = neural_renderer_torch.load_obj(args.input_file) # Add a batch size of 1: # vertices: [1, num_vertices, 3] vertices = vertices[None, :, :] # Upload the data to the GPU. device = torch.device('cuda:' + str(args.gpu)) torch.cuda.set_device(device) vertices = torch.tensor(vertices, device=device) faces = torch.tensor(faces, device=device) # Create the renderer object. renderer = neural_renderer_torch.Renderer() # Run the rendering loop. loop = tqdm.tqdm(range(0, 360, 4)) for num, azimuth in enumerate(loop): loop.set_description('Rendering') renderer.viewpoints = neural_renderer_torch.get_points_from_angles( CAMERA_DISTANCE, ELEVATION, azimuth) # Scale each frame to the [0, 255] interval. image = renderer.render_silhouettes(vertices, faces)[0].cpu().numpy() min_val, max_val = image.min(), image.max() image = (image - min_val) / (max_val - min_val) * 255 # Save each frame to the working directory. image = Image.fromarray(image.astype(np.uint8)) image.save('%s/_tmp_%04d.png' % (working_dir, num)) make_gif(working_dir, args.output_file)
def test_forward_case2(self): data = [ [ './tests_torch/data/4e49873292196f02574b5684eaec43e9/model.obj', neural_renderer_torch.get_points_from_angles(2.5, 10, -90), './tests_torch/data/4e49873292196f02574b5684eaec43e9.png', ], [ './tests_torch/data/1cde62b063e14777c9152a706245d48/model.obj', neural_renderer_torch.get_points_from_angles(2.5, 10, 60), './tests_torch/data/1cde62b063e14777c9152a706245d48.png', ] ] for i, (filename, viewpoint, reference) in enumerate(data): # Render Chainer renderer = neural_renderer_chainer.Renderer() renderer.draw_backside = False renderer.viewpoints = viewpoint vertices, faces, vertices_t, faces_t, textures = neural_renderer_chainer.load_obj( filename, load_textures=True) vertices, faces, vertices_t, faces_t, textures = neural_renderer_chainer.to_gpu( (vertices[None, :, :], faces, vertices_t[None, :, :], faces_t, textures[None, :, :, :])) images = renderer.render(vertices, faces, vertices_t, faces_t, textures).data image_chainer = images[0].transpose((1, 2, 0)).get() # Render PyTorch renderer = neural_renderer_torch.Renderer() renderer.draw_backside = False renderer.viewpoints = viewpoint vertices, faces, vertices_t, faces_t, textures = neural_renderer_torch.load_obj( filename, load_textures=True) vertices, faces, vertices_t, faces_t, textures = neural_renderer_torch.to_gpu( (vertices[None, :, :], faces, vertices_t[None, :, :], faces_t, textures[None, :, :, :])) images = renderer.render(vertices, faces, vertices_t, faces_t, textures) images = images.cpu().numpy() image_pytorch = images[0].transpose((1, 2, 0)) assert np.mean(np.abs(image_chainer - image_pytorch)) < 1e-4
def test_case1(self): data = [ [ './tests_chainer/data/4e49873292196f02574b5684eaec43e9/model.obj', neural_renderer_torch.get_points_from_angles(2.5, 10, -90), './tests_chainer/data/4e49873292196f02574b5684eaec43e9.png', ], [ './tests_chainer/data/1cde62b063e14777c9152a706245d48/model.obj', neural_renderer_torch.get_points_from_angles(2.5, 10, 60), './tests_chainer/data/1cde62b063e14777c9152a706245d48.png', ] ] filename_tmp = './tests_chainer/data/tmp.obj' renderer = neural_renderer_torch.Renderer() renderer.draw_backside = False for i, (filename, viewpoint, reference) in enumerate(data): renderer.viewpoints = viewpoint ref = neural_renderer_torch.imread(reference) vertices, faces, vertices_t, faces_t, textures = neural_renderer_torch.load_obj( filename, load_textures=True) neural_renderer_torch.save_obj(filename_tmp, vertices, faces, vertices_t, faces_t, textures) vertices, faces, vertices_t, faces_t, textures = neural_renderer_torch.load_obj( filename_tmp, load_textures=True) vertices, faces, vertices_t, faces_t, textures = neural_renderer_torch.to_gpu( (vertices[None, :, :], faces, vertices_t[None, :, :], faces_t, textures[None, :, :, :])) images = renderer.render(vertices, faces, vertices_t, faces_t, textures).data image = images[0].transpose((1, 2, 0)) chainer.testing.assert_allclose(ref, image, atol=1e-2, rtol=1e-2) for f in glob.glob('./tests_chainer/data/tmp*'): os.remove(f)
def __init__(self, input_obj_file, input_ref_file=None): super(Model, self).__init__() # Load the OBJ file. vertices, faces = neural_renderer_torch.load_obj(input_obj_file) self.vertices = torch.as_tensor(vertices[None, :, :]) self.faces = torch.as_tensor(faces) # Load the reference image. if input_ref_file is not None: self.image_ref = torch.as_tensor( neural_renderer_torch.imread(input_ref_file)) else: self.image_ref = None # Set up the camera parameters. self.camera_position = torch.nn.Parameter( torch.tensor([6, 10, -14], dtype=torch.float32)) # Set up the renderer. renderer = neural_renderer_torch.Renderer() renderer.viewpoints = self.camera_position self.renderer = renderer
def test_forward_case3(self): # Render with Chainer target_num = 2 vertices, faces = neural_renderer_chainer.load_obj( './tests_torch/data/teapot.obj') vertices_batch = np.tile(vertices[None, :, :], (4, 1, 1)) * 0 vertices_batch[target_num] = vertices vertices, faces = neural_renderer_chainer.to_gpu( (vertices_batch, faces)) renderer = neural_renderer_chainer.Renderer() renderer.anti_aliasing = False renderer.viewpoints = neural_renderer_chainer.get_points_from_angles( 2, 30., 0) images = renderer.render_depth(vertices, faces).data[target_num] images_chainer = (images - images.min()) / (images.max() - images.min()) images_chainer = images_chainer.get() # Render with PyTorch target_num = 2 vertices, faces = neural_renderer_torch.load_obj( './tests_torch/data/teapot.obj') vertices_batch = np.tile(vertices[None, :, :], (4, 1, 1)) * 0 vertices_batch[target_num] = vertices vertices, faces = neural_renderer_torch.to_gpu((vertices_batch, faces)) renderer = neural_renderer_torch.Renderer() renderer.anti_aliasing = False renderer.viewpoints = neural_renderer_torch.get_points_from_angles( 2, 30., 0) images = renderer.render_depth(vertices, faces).data[target_num] images_pytorch = (images - images.min()) / (images.max() - images.min()) images_pytorch = images_pytorch.cpu().numpy() assert np.mean(np.abs(images_pytorch - images_chainer)) < 1e-4
def test_forward_case4(self): # Render with Chainer target_num = 2 vertices, faces = neural_renderer_chainer.load_obj( './tests_torch/data/teapot.obj') vertices_batch = np.tile(vertices[None, :, :], (4, 1, 1)) * 0 vertices_batch[target_num] = vertices vertices, faces = neural_renderer_chainer.to_gpu( (vertices_batch, faces)) vertices_t, faces_t, textures = neural_renderer_chainer.create_textures( faces.shape[0]) vertices_t = chainer.functions.tile(vertices_t[None, :, :], (4, 1, 1)).data textures = chainer.functions.tile(textures[None, :, :, :], (4, 1, 1, 1)).data vertices_t = chainer.cuda.to_gpu(vertices_t) faces_t = chainer.cuda.to_gpu(faces_t) textures = chainer.cuda.to_gpu(textures) lights = [] light_color = cp.asarray([[0.47481096, 0.7131511, 0.4510043], [0.49120015, 0.161955, 0.71638113], [0.32655084, 0.7805874, 0.7682426], [0.42193118, 0.90416473, 0.5267034]]).astype(cp.float32) light_direction = cp.asarray([[0.328245, 0.8916046, 0.31189483], [0.99824226, 0.05838178, 0.00867782], [0.35747865, 0.61983925, 0.6985467], [0.0393897, 0.6937492, 0.7191179]]).astype(cp.float32) lights.append( neural_renderer_chainer.DirectionalLight(light_color, light_direction)) light_color = cp.asarray([[0.2732121, 0.09439224, 0.38380036], [0.06487979, 0.02794903, 0.261018], [0.28739947, 0.2996951, 0.42412606], [0.10019363, 0.26517034, 0.07372955]]).astype(cp.float32) lights.append(neural_renderer_chainer.AmbientLight(light_color)) light_color = cp.asarray([[0.32410273, 0.24369295, 0.3126097], [0.3456873, 0.24514836, 0.21663068], [0.33004418, 0.25533527, 0.48039845], [0.29468802, 0.44377372, 0.10724097]]).astype(cp.float32) lights.append(neural_renderer_chainer.SpecularLight(light_color)) renderer = neural_renderer_chainer.Renderer() renderer.viewpoints = neural_renderer_chainer.get_points_from_angles( 2.732, 30, 30) renderer.draw_backside = False images_chainer = renderer.render_rgb(vertices, faces, vertices_t, faces_t, textures, lights=lights).data[target_num] images_chainer = images_chainer.get() # Render with PyTorch target_num = 2 vertices, faces = neural_renderer_torch.load_obj( './tests_torch/data/teapot.obj') vertices_batch = np.tile(vertices[None, :, :], (4, 1, 1)) * 0 vertices_batch[target_num] = vertices vertices, faces = neural_renderer_torch.to_gpu((vertices_batch, faces)) vertices_t, faces_t, textures = neural_renderer_torch.create_textures( faces.shape[0]) vertices_t = torch.as_tensor(vertices_t[None, :, :]).expand( (4, *vertices_t.shape)) faces_t = torch.as_tensor(faces_t) textures = torch.as_tensor(textures[None, :, :, :]).expand( (4, *textures.shape)) vertices_t = vertices_t.cuda() faces_t = faces_t.cuda() textures = textures.cuda() lights = [] light_color = torch.as_tensor([[0.47481096, 0.7131511, 0.4510043], [0.49120015, 0.161955, 0.71638113], [0.32655084, 0.7805874, 0.7682426], [0.42193118, 0.90416473, 0.5267034]]).type(torch.float32) light_direction = torch.as_tensor( [[0.328245, 0.8916046, 0.31189483], [0.99824226, 0.05838178, 0.00867782], [0.35747865, 0.61983925, 0.6985467], [0.0393897, 0.6937492, 0.7191179]]).type(torch.float32) lights.append( neural_renderer_torch.DirectionalLight(light_color, light_direction)) light_color = torch.as_tensor([[0.2732121, 0.09439224, 0.38380036], [0.06487979, 0.02794903, 0.261018], [0.28739947, 0.2996951, 0.42412606], [0.10019363, 0.26517034, 0.07372955]]).type(torch.float32) lights.append(neural_renderer_torch.AmbientLight(light_color)) light_color = torch.as_tensor([[0.32410273, 0.24369295, 0.3126097], [0.3456873, 0.24514836, 0.21663068], [0.33004418, 0.25533527, 0.48039845], [0.29468802, 0.44377372, 0.10724097]]).type(torch.float32) lights.append(neural_renderer_torch.SpecularLight(light_color)) renderer = neural_renderer_torch.Renderer() renderer.viewpoints = neural_renderer_torch.get_points_from_angles( 2.732, 30, 30) renderer.draw_backside = False images = renderer.render_rgb(vertices, faces, vertices_t, faces_t, textures, lights=lights).data[target_num] images_pytorch = images.cpu().numpy() assert np.mean(np.abs(images_pytorch - images_chainer)) < 1e-4
def run(): parser = argparse.ArgumentParser() parser.add_argument('-i', '--filename_input', type=str, default='./examples/data/teapot.obj') parser.add_argument('-bs', '--batch_size', type=int, default=1) parser.add_argument('-is', '--image_size', type=int, default=256) parser.add_argument('-us', '--unsafe', type=int, default=0) parser.add_argument('-g', '--gpu', type=int, default=0) args = parser.parse_args() # other settings camera_distance = 2.732 elevation = 30 texture_size = 2 # load .obj vertices, faces = neural_renderer_torch.load_obj(args.filename_input) vertices = vertices[ None, :, :] # [num_vertices, XYZ] -> [batch_size=1, num_vertices, XYZ] # create texture [batch_size=1, num_faces, texture_size, texture_size, texture_size, RGB] vertices_t, faces_t, textures = neural_renderer_torch.create_textures( faces.shape[0]) # tile to minibatch vertices = np.tile(vertices, (args.batch_size, 1, 1)) vertices_t = np.tile(vertices_t, (args.batch_size, 1, 1)) textures = np.tile(textures, (args.batch_size, 1, 1, 1)) # to gpu chainer.cuda.get_device_from_id(args.gpu).use() vertices = chainer.Variable(chainer.cuda.to_gpu(vertices)) faces = chainer.cuda.to_gpu(faces) vertices_t = chainer.Variable(chainer.cuda.to_gpu(vertices_t)) faces_t = chainer.cuda.to_gpu(faces_t) textures = chainer.Variable(chainer.cuda.to_gpu(textures)) # create renderer renderer = neural_renderer_torch.Renderer() renderer.image_size = args.image_size # draw object times_forward = [] times_backward = [] loop = tqdm.tqdm(range(0, 360, 15)) for num, azimuth in enumerate(loop): loop.set_description('Drawing') renderer.viewpoints = neural_renderer_torch.get_points_from_angles( camera_distance, elevation, azimuth) time_start = time.time() images = renderer.render_silhouettes( vertices, faces) # [batch_size, image_size, image_size] _ = images.data[0, 0, 0].get() time_end = time.time() times_forward.append(time_end - time_start) loss = chainer.functions.sum(images) _ = loss.data.get() time_start = time.time() loss.backward() time_end = time.time() times_backward.append(time_end - time_start) print('silhouette forward time: %.3f ms' % (np.sum(times_forward[1:]) / len(times_forward[1:]))) print('silhouette backward time: %.3f ms' % (np.sum(times_backward[1:]) / len(times_backward[1:]))) # draw object times_forward = [] times_backward = [] loop = tqdm.tqdm(range(0, 360, 15)) for num, azimuth in enumerate(loop): loop.set_description('Drawing') renderer.viewpoints = neural_renderer_torch.get_points_from_angles( camera_distance, elevation, azimuth) time_start = time.time() images = renderer.render( vertices, faces, vertices_t, faces_t, textures) # [batch_size, RGB, image_size, image_size] _ = images.data[0, 0, 0, 0].get() time_end = time.time() times_forward.append(time_end - time_start) loss = chainer.functions.sum(images) _ = loss.data.get() time_start = time.time() loss.backward() time_end = time.time() times_backward.append(time_end - time_start) print('texture forward time: %.3f ms' % (np.sum(times_forward[1:]) / len(times_forward[1:]))) print('texture backward time: %.3f ms' % (np.sum(times_backward[1:]) / len(times_backward[1:])))