def __init__(self, filename_obj, filename_ref):
        super(Model, self).__init__()

        # set template mesh
        texture_size = 4
        self.template_mesh = jr.Mesh.from_obj(filename_obj,
                                              texture_res=texture_size,
                                              load_texture=True,
                                              dr_type='softras')
        self.vertices = (self.template_mesh.vertices).stop_grad()
        self.faces = self.template_mesh.faces.stop_grad()
        self.textures = self.template_mesh.textures.stop_grad()
        self.metallic_textures = jt.zeros(
            (1, self.faces.shape[1], texture_size * texture_size,
             1)).float32() + 0.4
        self.metallic_textures = self.metallic_textures.stop_grad()
        self.roughness_textures = jt.ones(
            (1, self.faces.shape[1], texture_size * texture_size,
             1)).float32()
        # load reference image
        self.image_ref = jt.array(
            imread(filename_ref).astype('float32') / 255.).permute(
                2, 0, 1).unsqueeze(0).stop_grad()
        # setup renderer
        self.renderer = jr.Renderer(dr_type='softras')
Exemplo n.º 2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-i', '--filename-input', type=str, 
        default=os.path.join(data_dir, 'obj/spot/spot_triangulated.obj'))
    parser.add_argument('-o', '--output-dir', type=str, 
        default=os.path.join(data_dir, 'results/output_render'))
    args = parser.parse_args()

    # other settings
    camera_distance = 2.732
    elevation = 30
    azimuth = 0

    # load from Wavefront .obj file
    mesh = jr.Mesh.from_obj(args.filename_input, load_texture=True, texture_res=5 ,texture_type='surface', dr_type='softras')

    # create renderer with SoftRas
    renderer = jr.Renderer(dr_type='softras')

    os.makedirs(args.output_dir, exist_ok=True)
   
    #0.5 0.4
    metallic_textures = jt.zeros((1, mesh.faces.shape[1], 5 * 5, 1)).float32() + 0.5
    roughness_textures = jt.zeros((1, mesh.faces.shape[1], 5 * 5, 1)).float32() + 0.4

    # draw object from different view
    loop = tqdm.tqdm(list(range(0, 360, 4)))
    writer = imageio.get_writer(os.path.join(args.output_dir, 'rotation.gif'), mode='I')
    imgs = []
    from PIL import Image
    for num, azimuth in enumerate(loop):
        # rest mesh to initial state
        mesh.reset_()
        loop.set_description('Drawing rotation')
        renderer.transform.set_eyes_from_angles(camera_distance, elevation, azimuth)
        rgb = renderer(mesh.vertices, mesh.faces, textures=mesh.textures, metallic_textures=metallic_textures, roughness_textures=roughness_textures)
        image = rgb.numpy()[0].transpose((1, 2, 0))
        writer.append_data((255*image).astype(np.uint8))
    writer.close()

    # draw object from different sigma and gamma
    loop = tqdm.tqdm(list(np.arange(-4, -2, 0.2)))
    renderer.transform.set_eyes_from_angles(camera_distance, elevation, 45)
    writer = imageio.get_writer(os.path.join(args.output_dir, 'bluring.gif'), mode='I')
    for num, gamma_pow in enumerate(loop):
        # rest mesh to initial state
        mesh.reset_()
        renderer.set_gamma(10**gamma_pow)
        renderer.set_sigma(10**(gamma_pow - 1))
        loop.set_description('Drawing blurring')
        images = renderer(mesh.vertices, mesh.faces, textures=mesh.textures, metallic_textures=metallic_textures, roughness_textures=roughness_textures)
        image = images.numpy()[0].transpose((1, 2, 0))  # [image_size, image_size, RGB]
        writer.append_data((255*image).astype(np.uint8))
    writer.close()

    # save to textured obj
    mesh.reset_()
    mesh.save_obj(os.path.join(args.output_dir, 'saved_spot.obj'))
Exemplo n.º 3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-i', '--filename-input', type=str, 
        default=os.path.join(data_dir, 'source.npy'))
    parser.add_argument('-c', '--camera-input', type=str, 
        default=os.path.join(data_dir, 'camera.npy'))
    parser.add_argument('-t', '--template-mesh', type=str, 
        default=os.path.join(data_dir, 'obj/sphere/sphere_1352.obj'))
    parser.add_argument('-o', '--output-dir', type=str, 
        default=os.path.join(data_dir, 'results/output_deform'))
    parser.add_argument('-b', '--batch-size', type=int,
        default=120)
    args = parser.parse_args()

    os.makedirs(args.output_dir, exist_ok=True)
    model = Model(args.template_mesh)

    renderer = jr.Renderer(image_size=64, sigma_val=1e-4, aggr_func_rgb='hard', camera_mode='look_at', viewing_angle=15, dr_type='softras')

    # read training images and camera poses
    images = np.load(args.filename_input).astype('float32') / 255.
    cameras = np.load(args.camera_input).astype('float32')
    optimizer = nn.Adam(model.parameters(), 0.01, betas=(0.5, 0.99))
    
    camera_distances = jt.array(cameras[:, 0])
    elevations = jt.array(cameras[:, 1])
    viewpoints = jt.array(cameras[:, 2])
    renderer.transform.set_eyes_from_angles(camera_distances, elevations, viewpoints)

    import time
    sta = time.time()
    loop = tqdm.tqdm(list(range(0, 1000)))
    writer = imageio.get_writer(os.path.join(args.output_dir, 'deform.gif'), mode='I')
    for i in loop:
        images_gt = jt.array(images)

        mesh, laplacian_loss, flatten_loss = model(args.batch_size)
        images_pred = renderer.render_mesh(mesh, mode='silhouettes')

        # optimize mesh with silhouette reprojection error and 
        # geometry constraints
        loss = neg_iou_loss(images_pred, images_gt[:, 3]) + \
               0.03 * laplacian_loss + \
               0.0003 * flatten_loss
            
        loop.set_description('Loss: %.4f'%(loss.item()))
        optimizer.step(loss)
        
        if i % 100 == 0:
            image = images_pred.numpy()[0]#.transpose((1, 2, 0))
            imageio.imsave(os.path.join(args.output_dir, 'deform_%05d.png'%i), (255*image).astype(np.uint8))
            writer.append_data((255*image).astype(np.uint8))

    # save optimized mesh
    model(1)[0].save_obj(os.path.join(args.output_dir, 'plane.obj'), save_texture=False)
    print(f"Cost {time.time() - sta} secs.")
    def __init__(self, filename_obj, args):
        super(Model, self).__init__()

        self.encoder = Encoder(im_size=args.image_size)
        self.decoder = Decoder(filename_obj)
        self.renderer = jr.Renderer(image_size=args.image_size,
                                    sigma_val=args.sigma_val,
                                    aggr_func_rgb='hard',
                                    camera_mode='look_at',
                                    viewing_angle=15,
                                    dist_eps=1e-10,
                                    dr_type='softras')
        self.laplacian_loss = jr.LaplacianLoss(self.decoder.vertices_base,
                                               self.decoder.faces)
        self.flatten_loss = jr.FlattenLoss(self.decoder.faces)
Exemplo n.º 5
0
    def __init__(self, cfgs):
        self.image_size = cfgs.get('image_size', 64)
        self.min_depth = cfgs.get('min_depth', 0.9)
        self.max_depth = cfgs.get('max_depth', 1.1)
        self.rot_center_depth = cfgs.get('rot_center_depth',
                                         (self.min_depth + self.max_depth) / 2)
        self.fov = cfgs.get('fov', 10)
        self.tex_cube_size = cfgs.get('tex_cube_size', 2)
        self.renderer_min_depth = cfgs.get('renderer_min_depth', 0.1)
        self.renderer_max_depth = cfgs.get('renderer_max_depth', 10.)

        #### camera intrinsics
        #             (u)   (x)
        #    d * K^-1 (v) = (y)
        #             (1)   (z)

        ## renderer for visualization
        R = [[[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]]
        R = jt.array(R).float32()
        t = jt.zeros((1, 3)).float32()
        fx = (self.image_size - 1) / 2 / (math.tan(
            self.fov / 2 * math.pi / 180))
        fy = (self.image_size - 1) / 2 / (math.tan(
            self.fov / 2 * math.pi / 180))
        cx = (self.image_size - 1) / 2
        cy = (self.image_size - 1) / 2
        K = [[fx, 0., cx], [0., fy, cy], [0., 0., 1.]]
        self.inv_K = jt.array(np.linalg.inv(
            np.array(K))).unsqueeze(0).float32()
        K = jt.array(K).float32()
        self.K = K.unsqueeze(0)
        self.renderer = jr.Renderer(camera_mode='projection',
                                    light_intensity_ambient=1.0,
                                    light_intensity_directionals=0.,
                                    K=self.K,
                                    R=R,
                                    t=t,
                                    near=self.renderer_min_depth,
                                    far=self.renderer_max_depth,
                                    image_size=self.image_size,
                                    orig_size=self.image_size,
                                    fill_back=True,
                                    background_color=[1, 1, 1],
                                    dr_type='n3mr')
Exemplo n.º 6
0
    def __init__(self, filename_obj, filename_ref):
        super(Model, self).__init__()

        # set template mesh
        self.template_mesh = jr.Mesh.from_obj(filename_obj, dr_type='softras')
        self.vertices = (self.template_mesh.vertices * 0.6).stop_grad()
        self.faces = self.template_mesh.faces.stop_grad()
        # self.textures = self.template_mesh.textures
        texture_size = 4
        self.textures = jt.zeros((1, self.faces.shape[1], texture_size,
                                  texture_size, texture_size, 3)).float32()

        # load reference image
        self.image_ref = jt.array(
            imread(filename_ref).astype('float32') / 255.).permute(
                2, 0, 1).unsqueeze(0).stop_grad()

        # setup renderer
        self.renderer = jr.Renderer(camera_mode='look_at',
                                    perspective=False,
                                    light_intensity_directionals=0.0,
                                    light_intensity_ambient=1.0,
                                    dr_type='softras')