Esempio n. 1
0
    def __init__(self, image_size=256, background_color=[0,0,0], near=1, far=100, 
                 anti_aliasing=False, fill_back=True, eps=1e-3,
                 sigma_val=1e-5, dist_func='euclidean', dist_eps=1e-4,
                 gamma_val=1e-4, aggr_func_rgb='softmax', aggr_func_alpha='prod',
                 texture_type='surface',
                 camera_mode='projection',
                 P=None, dist_coeffs=None, orig_size=512,
                 perspective=True, viewing_angle=30, viewing_scale=1.0, 
                 eye=None, camera_direction=[0,0,1],
                 light_mode='surface',
                 light_intensity_ambient=0.5, light_color_ambient=[1,1,1],
                 light_intensity_directionals=0.5, light_color_directionals=[1,1,1],
                 light_directions=[0,1,0]):
        super(SoftRenderer, self).__init__()

        # light
        self.lighting = sr.Lighting(light_mode,
                                    light_intensity_ambient, light_color_ambient,
                                    light_intensity_directionals, light_color_directionals,
                                    light_directions)

        # camera
        self.transform = sr.Transform(camera_mode, 
                                      P, dist_coeffs, orig_size,
                                      perspective, viewing_angle, viewing_scale, 
                                      eye, camera_direction)

        # rasterization
        self.rasterizer = sr.SoftRasterizer(image_size, background_color, near, far, 
                                            anti_aliasing, fill_back, eps,
                                            sigma_val, dist_func, dist_eps,
                                            gamma_val, aggr_func_rgb, aggr_func_alpha,
                                            texture_type)
Esempio n. 2
0
    def __init__(self, filename_obj, args):
        super(Model, self).__init__()

        # auto-encoder
        self.encoder = Encoder(im_size=args.image_size)
        self.decoder = Decoder(filename_obj)

        # renderer
        self.transform = sr.LookAt(viewing_angle=15)
        self.lighting = sr.Lighting()
        self.rasterizer = sr.SoftRasterizer(image_size=args.image_size, sigma_val=args.sigma_val,
                                            aggr_func_rgb='hard', aggr_func_alpha='prod', dist_eps=1e-10)

        # mesh regularizer
        self.laplacian_loss = sr.LaplacianLoss(self.decoder.vertices_base, self.decoder.faces)
        self.flatten_loss = sr.FlattenLoss(self.decoder.faces)
Esempio n. 3
0
    def __init__(self,
                 image_size=256,
                 background_color=[0, 0, 0],
                 near=1,
                 far=100,
                 anti_aliasing=True,
                 fill_back=True,
                 eps=1e-6,
                 camera_mode='projection',
                 P=None,
                 dist_coeffs=None,
                 orig_size=512,
                 K=None,
                 perspective=True,
                 viewing_angle=30,
                 viewing_scale=1.0,
                 eye=None,
                 camera_direction=[0, 0, 1],
                 light_mode='surface',
                 light_intensity_ambient=0.5,
                 light_color_ambient=[1, 1, 1],
                 light_intensity_directionals=0.5,
                 light_color_directionals=[1, 1, 1],
                 light_directions=[0, 1, 0]):
        super(Renderer, self).__init__()

        # light
        self.lighting = sr.Lighting(light_mode, light_intensity_ambient,
                                    light_color_ambient,
                                    light_intensity_directionals,
                                    light_color_directionals, light_directions)

        # camera
        self.transform = sr.Transform(camera_mode, P, K, dist_coeffs,
                                      orig_size, perspective, viewing_angle,
                                      viewing_scale, eye, camera_direction)

        # rasterization
        self.rasterizer = sr.SoftRasterizer(image_size, background_color, near,
                                            far, anti_aliasing, fill_back, eps)
Esempio n. 4
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-i',
                        '--filename-input',
                        type=str,
                        default=os.path.join(data_dir, 'source.npy'))
    parser.add_argument('-c',
                        '--camera-input',
                        type=str,
                        default=os.path.join(data_dir, 'camera.npy'))
    parser.add_argument('-t',
                        '--template-mesh',
                        type=str,
                        default=os.path.join(data_dir,
                                             'obj/sphere/sphere_1352.obj'))
    parser.add_argument('-o',
                        '--output-dir',
                        type=str,
                        default=os.path.join(data_dir,
                                             'results/output_deform'))
    parser.add_argument('-b', '--batch-size', type=int, default=120)
    args = parser.parse_args()

    os.makedirs(args.output_dir, exist_ok=True)

    model = Model(args.template_mesh).cuda()
    transform = sr.LookAt(viewing_angle=15)
    lighting = sr.Lighting()
    rasterizer = sr.SoftRasterizer(image_size=64,
                                   sigma_val=1e-4,
                                   aggr_func_rgb='hard')

    # read training images and camera poses
    images = np.load(args.filename_input).astype('float32') / 255.
    cameras = np.load(args.camera_input).astype('float32')
    optimizer = torch.optim.Adam(model.parameters(), 0.01, betas=(0.5, 0.99))

    camera_distances = torch.from_numpy(cameras[:, 0])
    elevations = torch.from_numpy(cameras[:, 1])
    viewpoints = torch.from_numpy(cameras[:, 2])
    transform.set_eyes_from_angles(camera_distances, elevations, viewpoints)

    loop = tqdm.tqdm(list(range(0, 2000)))
    writer = imageio.get_writer(os.path.join(args.output_dir, 'deform.gif'),
                                mode='I')
    for i in loop:
        images_gt = torch.from_numpy(images).cuda()

        mesh, laplacian_loss, flatten_loss = model(args.batch_size)

        # render
        mesh = lighting(mesh)
        mesh = transform(mesh)
        images_pred = rasterizer(mesh)

        # optimize mesh with silhouette reprojection error and
        # geometry constraints
        loss = neg_iou_loss(images_pred[:, 3], images_gt[:, 3]) + \
            0.03 * laplacian_loss + \
            0.0003 * flatten_loss

        loop.set_description('Loss: %.4f' % (loss.item()))

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if i % 100 == 0:
            image = images_pred.detach().cpu().numpy()[0].transpose((1, 2, 0))
            writer.append_data((255 * image).astype(np.uint8))
            imageio.imsave(
                os.path.join(args.output_dir, 'deform_%05d.png' % i),
                (255 * image[..., -1]).astype(np.uint8))

    # save optimized mesh
    model(1)[0].save_obj(os.path.join(args.output_dir, 'plane.obj'),
                         save_texture=False)
Esempio n. 5
0
    def __init__(self,
                 image_size=256,
                 background_color=[0, 0, 0],
                 near=1,
                 far=100,
                 anti_aliasing=False,
                 fill_back=True,
                 eps=1e-3,
                 sigma_val=1e-5,
                 dist_func='euclidean',
                 dist_eps=1e-4,
                 gamma_val=1e-4,
                 aggr_func_rgb='softmax',
                 aggr_func_alpha='prod',
                 texture_type='surface',
                 camera_mode='projection',
                 P=None,
                 dist_coeffs=None,
                 orig_size=512,
                 perspective=True,
                 viewing_angle=30,
                 viewing_scale=1.0,
                 eye=None,
                 camera_direction=[0, 0, 1],
                 light_mode='surface',
                 light_intensity_ambient=0.5,
                 light_color_ambient=[1, 1, 1],
                 light_intensity_directionals=0.5,
                 light_color_directionals=[1, 1, 1],
                 light_directions=[0, 1, 0],
                 shadow=True,
                 light_width=2,
                 softmin_scale=10):
        super(SoftRenderer, self).__init__()

        # light
        self.lighting = sr.Lighting(light_mode, light_intensity_ambient,
                                    light_color_ambient,
                                    light_intensity_directionals,
                                    light_color_directionals, light_directions)

        # camera
        self.transform = sr.Transform(camera_mode, P, dist_coeffs, orig_size,
                                      perspective, viewing_angle,
                                      viewing_scale, eye, camera_direction)

        if not isinstance(light_directions, (torch.Tensor, np.ndarray)):
            light_directions = torch.FloatTensor(light_directions)
        self.light_space_tform = None
        if shadow:
            # TODO: Allow specifying the light distance
            # TODO: the light should really get its own viewing_scale instead of
            #       sharing with the main camera
            self.light_space_tform = sr.Transform('look_at',
                                                  perspective=False,
                                                  eye=30 * light_directions,
                                                  viewing_scale=viewing_scale)
        self.viewing_scale = viewing_scale

        # rasterization
        self.depth_rasterizer = sr.SoftRasterizer(image_size, background_color,
                                                  near, far, False, fill_back,
                                                  eps, sigma_val, dist_func,
                                                  dist_eps, gamma_val, 'depth',
                                                  aggr_func_alpha,
                                                  texture_type)
        self.rasterizer = sr.SoftRasterizer(image_size, background_color, near,
                                            far, anti_aliasing, fill_back, eps,
                                            sigma_val, dist_func, dist_eps,
                                            gamma_val, aggr_func_rgb,
                                            aggr_func_alpha, texture_type,
                                            light_width, softmin_scale)
Esempio n. 6
0
    def __init__(
            self,
            image_size=256,
            background_color=[0, 0, 0],
            near=1,
            far=100,
            is_tranform=True,
            anti_aliasing=False,
            fill_back=True,
            eps=1e-3,
            sigma_val=1e-5,
            dist_func='euclidean',
            dist_eps=1e-4,
            gamma_val=1e-4,
            aggr_func_rgb='softmax',
            aggr_func_alpha='prod',
            texture_type='vertex',
            camera_mode='projection',
            K=None,
            R=None,
            t=None,
            dist_coeffs=None,
            orig_size=512,
            perspective=True,
            viewing_angle=30,
            viewing_scale=1.0,
            eye=None,
            camera_direction=[0, 0, 1],
            light_mode='direcntional',  # directional, spherical 
            shading_mode='Gouraud',  #flat, Gouraud, Phong
            light_intensity_ambient=0.5,
            light_color_ambient=[1, 1, 1],
            light_intensity_directionals=0.5,
            light_color_directionals=[1, 1, 1],
            light_directions=[0, 1, 0]):
        super(TexSoftRenderer, self).__init__()

        # light
        self.light_mode = light_mode
        if light_mode == 'directional':
            self.lighting = sr.Lighting(light_mode, light_intensity_ambient,
                                        light_color_ambient,
                                        light_intensity_directionals,
                                        light_color_directionals,
                                        light_directions)
        elif light_mode == 'spherical':
            self.lighting = sr.SHLighting()
        else:
            print('no lighting')
        # camera
        if is_tranform:
            self.transform = sr.Transform(camera_mode, K, R, t, dist_coeffs,
                                          orig_size, perspective,
                                          viewing_angle, viewing_scale, eye,
                                          camera_direction)

        # rasterization
        self.texture_type = texture_type
        anti_aliasing = False
        self.rasterizer = sr.SoftRasterizer(image_size, background_color, near,
                                            far, anti_aliasing, fill_back, eps,
                                            sigma_val, dist_func, dist_eps,
                                            gamma_val, aggr_func_rgb,
                                            aggr_func_alpha, texture_type)
        self.is_transform = is_tranform