Esempio n. 1
0
    def __init__(self, camera_mode='projection', K = None, R = None, t = None,\
        smpl_model_path = "/hdd/zen/dev/ActMix/actmix/DataGen/MotionRender/ActmixTorchGenerator/smpl_models/",\
        texture_path =  "/hdd/zen/data/SURREAL/smpl_data/", dtype = torch.float32, gender = "male", image_size = 256, \
        device =(torch.device("cuda", index=0) if torch.cuda.is_available() else torch.device("cpu")), \
            background_color=[0,0,0],
        ):

        self.dtype = dtype
        self.camera_mode = camera_mode
        self.image_size = image_size
            
        r = sciR.from_euler('zxy', [0, 0, 0], degrees = True)
        vet_rot = r.as_matrix()
        fx = 500.0
        fy = 500.0
        cx = 512.0
        cy = 512.0
        def_K = np.array( [ [fx, 0., cx],
                        [ 0. ,fx, cy],
                        [0.,0.,1.]])

        def_K = def_K[None, :]
        def_R = vet_rot[None, :]
        def_t = np.array([0,0,1.5])

        if K is None:
            K = def_K
        if R is None:
            R = def_R
        if t is None:
            t = def_t

        
        if camera_mode == "look_at":
            self.renderer = nr.Renderer(camera_mode='look_at', viewing_angle = 30, image_size = image_size, background_color= background_color)
            self.set_render_angles(2.5, 0, 0)
        elif camera_mode == "projection":
            self.renderer = nr.Renderer(K = K, R = R, t = t, camera_mode='projection', image_size = image_size, light_intensity_ambient=1, background_color= background_color)
        

        self.renderer = self.renderer.to(device)
        
        self.gender = gender
        
        self.faces = np.load(os.path.join(smpl_model_path, "smpl_faces.npy"))[np.newaxis, :,:]
        self.device = device
        self.faces_uv = np.load(os.path.join(texture_path, 'final_faces_uv_mapping.npy'))
        self.uv_sampler = torch.from_numpy(self.faces_uv.reshape(-1, 2, 2, 2)).to(device).type(self.dtype)
        self.uv_sampler = self.uv_sampler.view(-1, 13776, 2*2, 2)


        self.male_texture_dataset = TextureDataset(texture_path, "male")
        self.male_smpl_p = SMPL_Parser(device = device, gender = "male")

        self.female_texture_dataset = TextureDataset(texture_path, "female")
        self.female_smpl_p = SMPL_Parser(device = device, gender = "female")

        self.smpl_p = self.male_smpl_p if self.gender == 'male' else self.female_smpl_p
        self.texture_dataset = self.male_texture_dataset if self.gender == 'male' else self.female_texture_dataset
    def __init__(self, filename_obj, filename_ref=None):
        super(Model, self).__init__()
        # Load mesh vertices and faces
        vertices, faces = load_off(filename_obj)

        self.register_buffer('vertices', vertices[None, :, :])
        self.register_buffer('faces', faces[None, :, :])

        # create textures
        texture_size = 2
        textures = torch.ones(1,
                              self.faces.shape[1],
                              texture_size,
                              texture_size,
                              texture_size,
                              3,
                              dtype=torch.float32)
        self.register_buffer('textures', textures)

        # load reference image
        image_ref = torch.from_numpy(
            (imread(filename_ref).max(-1)).astype(np.float32))
        # print(image_ref.unique())
        # # TODO: try it without thresholding, just normalize
        self.register_buffer('image_ref', image_ref)

        # camera parameters
        self.camera_position = nn.Parameter(
            torch.from_numpy(np.array([6, 10, -14], dtype=np.float32)))

        # setup renderer
        renderer = nr.Renderer(camera_mode='look_at')
        renderer.eye = self.camera_position
        self.renderer = renderer
Esempio n. 3
0
    def __init__(self,
                 img_size=256,
                 face_path='models/smpl_faces.npy',
                 t_size=1):

        self.renderer = nr.Renderer(img_size,
                                    camera_mode='look_at',
                                    perspective=False)
        self.set_light_dir([1, .5, -1], int_dir=0.3, int_amb=0.7)
        self.set_bgcolor([1, 1, 1.])
        self.img_size = img_size

        self.faces_np = np.load(face_path).astype(np.int)
        self.faces = to_variable(torch.IntTensor(self.faces_np).cuda())
        if self.faces.dim() == 2:
            self.faces = torch.unsqueeze(self.faces, 0)

        # Default color:
        default_tex = np.ones(
            (1, self.faces.shape[1], t_size, t_size, t_size, 3))
        self.default_tex = to_variable(torch.FloatTensor(default_tex).cuda())

        # Default camera:
        cam = np.hstack([0.9, 0, 0])
        default_cam = to_variable(torch.FloatTensor(cam).cuda())
        self.default_cam = torch.unsqueeze(default_cam, 0)

        # Setup proj fn:
        self.proj_fn = orthographic_proj_withz_idrot
Esempio n. 4
0
    def __init__(self, filename_obj, filename_ref):
        super(Model, self).__init__()
        vertices, faces = nr.load_obj(filename_obj)
        self.register_buffer('vertices', vertices[None, :, :])
        self.register_buffer('faces', faces[None, :, :])

        # create textures
        texture_size = 4
        textures = torch.zeros(1,
                               self.faces.shape[1],
                               texture_size,
                               texture_size,
                               texture_size,
                               3,
                               dtype=torch.float32)
        self.textures = nn.Parameter(textures)

        # load reference image
        image_ref = torch.from_numpy(
            imread(filename_ref).astype('float32') / 255.).permute(2, 0,
                                                                   1)[None, ::]
        self.register_buffer('image_ref', image_ref)

        # setup renderer
        renderer = nr.Renderer(camera_mode='look_at')
        renderer.perspective = False
        renderer.light_intensity_directional = 0.0
        renderer.light_intensity_ambient = 1.0
        self.renderer = renderer
Esempio n. 5
0
    def test_backward_case1(self):
        """Backward if non-zero gradient is out of a face."""

        vertices = [
            [0.8, 0.8, 1.],
            [0.0, -0.5, 1.],
            [0.2, -0.4, 1.]]
        faces = [[0, 1, 2]]
        pxi = 35
        pyi = 25
        grad_ref = [
            [1.6725862, -0.26021874, 0.],
            [1.41986704, -1.64284933, 0.],
            [0., 0., 0.],
        ]

        renderer = nr.Renderer(camera_mode='look_at')
        renderer.image_size = 64
        renderer.anti_aliasing = False
        renderer.perspective = False

        vertices = torch.from_numpy(np.array(vertices, np.float32)).cuda()
        faces = torch.from_numpy(np.array(faces, np.int32)).cuda()
        grad_ref = torch.from_numpy(np.array(grad_ref, np.float32)).cuda()
        vertices, faces, grad_ref = utils.to_minibatch((vertices, faces, grad_ref))
        vertices.requires_grad = True
        images = renderer(vertices, faces, mode='silhouettes')
        loss = torch.sum(torch.abs(images[:, pyi, pxi] - 1))
        loss.backward()

        assert(torch.allclose(vertices.grad, grad_ref, rtol=1e-2))
Esempio n. 6
0
    def __init__(self, filename_obj, filename_ref):
        super(Model, self).__init__()

        with self.init_scope():
            # load .obj
            vertices, faces = neural_renderer.load_obj(filename_obj)
            self.vertices = chainer.Parameter(vertices[None, :, :])
            self.faces = faces[None, :, :]

            # create textures
            texture_size = 2
            textures = np.ones((1, self.faces.shape[1], texture_size,
                                texture_size, texture_size, 3), 'float32')
            self.textures = textures

            # load reference image
            #self.image_ref = scipy.misc.imread(filename_ref).astype('float32').mean(-1) / 255.
            image_ref = scipy.misc.imread(filename_ref).astype('float32').mean(
                -1) / 255.
            h = image_ref.shape[0]
            w = image_ref.shape[1]
            hcrop = wcrop = self.image_size = 256  #np.min(image_ref.shape[:1])
            top = int(math.floor((h - hcrop) / 2.0))
            bottom = int(math.ceil((h + hcrop) / 2.0))
            left = int(math.floor((w - wcrop) / 2.0))
            right = int(math.ceil((w + wcrop) / 2.0))
            if len(image_ref.shape) > 2:
                self.image_ref = image_ref[top:bottom, left:right, :]
            else:
                self.image_ref = image_ref[top:bottom, left:right]
            # setup renderer
            renderer = neural_renderer.Renderer()
            self.renderer = renderer
    def __init__(self, vertices, faces, filename_ref, camera_distance, camera_elevation, camera_azimuth,
                 base_textures=None):
        super(ModelTextures, self).__init__()

        self.register_buffer('vertices', vertices)
        self.register_buffer('faces', faces)

        # Create textures
        # If no base textures are specified they are initialized to white
        if base_textures is None:
            texture_size = 2
            textures = torch.ones(1, self.faces.shape[1], texture_size, texture_size, texture_size, 3,
                                  dtype=torch.float32).cuda()
            self.textures = nn.Parameter(textures)
        else:
            self.textures = nn.Parameter(base_textures)

        # load reference image
        image_ref = torch.from_numpy(imread(filename_ref).astype('float32') / 255.).permute(2, 0, 1)[None, ::]
        image_ref = image_ref.cuda()
        self.register_buffer('image_ref', image_ref)

        # save camera parameters
        self.camera_x = camera_distance
        self.camera_y = camera_elevation
        self.camera_z = camera_azimuth

        # setup renderer
        renderer = nr.Renderer(camera_mode='look_at', far=200)
        renderer.light_intensity_directional = 0
        renderer.light_intensity_ambient = 1
        renderer.eye = (self.camera_x, self.camera_y, self.camera_z)
        self.renderer = renderer
Esempio n. 8
0
    def __init__(self, filename_obj, filename_ref):
        super(Model, self).__init__()

        with self.init_scope():
            # load .obj
            vertices, faces = neural_renderer.load_obj(filename_obj)
            self.vertices = vertices[None, :, :]
            self.faces = faces[None, :, :]

            # create textures
            texture_size = 4
            textures = np.zeros((1, self.faces.shape[1], texture_size,
                                 texture_size, texture_size, 3), 'float32')
            self.textures = chainer.Parameter(textures)

            # load reference image
            self.image_ref = scipy.misc.imread(filename_ref).astype(
                'float32') / 255.

            # setup renderer
            renderer = neural_renderer.Renderer()
            renderer.perspective = False
            renderer.light_intensity_directional = 0.0
            renderer.light_intensity_ambient = 1.0
            self.renderer = renderer
Esempio n. 9
0
    def test_forward_case3(self):
        """Whether a silhouette by neural renderer matches that by Blender."""

        # load teapot
        vertices, faces, textures = utils.load_teapot_batch()
        vertices = vertices.cuda()
        faces = faces.cuda()
        textures = textures.cuda()

        # create renderer
        renderer = nr.Renderer()
        renderer.camera.position = self.position
        renderer.camera.image_size = 256
        renderer.anti_aliasing = False
        renderer.light_intensity_ambient = 1.0
        renderer.light_intensity_directional = 0.0

        images = renderer(vertices, faces, textures)
        images = images.detach().cpu().numpy()
        image = images[2].mean(0)

        # load reference image by blender
        ref = imread(os.path.join(data_dir, 'teapot_blender.png'))
        ref = (ref.min(axis=-1) != 255).astype(np.float32)

        assert (np.allclose(ref, image))
Esempio n. 10
0
    def __init__(self, img_size, camera_data, vis3d):
        super(SMPLModel, self).__init__()

        # camera
        self.camera_t = torch.load('runner_srcipts/camera.pt')['camera_t'][None, :]
        self.camera_f = torch.FloatTensor(camera_data['camera_f']).cuda()[None, :]
        self.camera_c = torch.FloatTensor(camera_data['camera_c']).cuda()[None, :]
        self.camera_rt = torch.FloatTensor(camera_data['camera_rt']).cuda()[None, :]
        self.dist_coeffs = torch.FloatTensor([[0., 0., 0., 0., 0.]]).repeat(self.camera_f.shape[0], 1).cuda()

        self.img_size = img_size
        self.orig_size = camera_data['height']
        self.vis3d = vis3d

        # SMPL
        self.smpl = SMPL('hmr/models/neutral_smpl_with_cocoplus_reg.pkl', 'cuda', 'coco').cuda()
        initial_thetas = torch.FloatTensor(np.concatenate([[np.pi, 0, 0], MEAN_POSE])).cuda()
        initial_betas = torch.randn(10).cuda()
        self.faces = self.smpl.faces[None, :, :]
        self.theta = nn.Parameter(initial_thetas[None, :])
        self.betas = nn.Parameter(initial_betas[None, :])

        # renderer
        self.renderer = neural_renderer.Renderer(
            image_size=self.img_size,
            camera_f=self.camera_f,
            camera_c=self.camera_c,
            camera_rt=self.camera_rt,
            camera_t=self.camera_t,
            orig_size=self.orig_size,
            near=1.0,
            far=100.0,
            camera_mode='projection_by_params'
        )
Esempio n. 11
0
def render_img_from_mesh(mesh, e, a, d, file_name):
    renderer = nr.Renderer(camera_mode='look_at',
                           image_size=512,
                           background_color=[1, 1, 1],
                           light_intensity_ambient=0.3)
    renderer = renderer.to(device)
    renderer.eye = nr.get_points_from_angles(d, e, a)
    texture_size = 2
    v = torch.Tensor(mesh.vertices)[None, :, :]
    f = torch.Tensor(mesh.faces)[None, :, :]

    v = v.to(device)
    f = f.int().to(device)

    textures = torch.ones(1,
                          f.shape[1],
                          texture_size,
                          texture_size,
                          texture_size,
                          3,
                          dtype=torch.float32).cuda()
    textures[:] = 0.9
    images, _, _ = renderer(v, f, textures)
    image1 = images.detach().cpu().numpy()[0].transpose(1, 2, 0)
    image2 = (image1 * 255).astype(np.uint8)
    scipy.misc.imsave(file_name, image2)
    return image1
Esempio n. 12
0
    def __init__(self, opt):
        self.opt = opt
        self.render_size = opt.crop_size
        d = './3ddfa/train.configs'
        w_shp = _load(osp.join(d, 'w_shp_sim.npy'))
        w_exp = _load(osp.join(d, 'w_exp_sim.npy'))  # simplified version
        u_shp = _load(osp.join(d, 'u_shp.npy'))
        u_exp = _load(osp.join(d, 'u_exp.npy'))
        self.keypoints = _load(osp.join(d, 'keypoints_sim.npy'))
        self.texMU = _load(osp.join(d, 'texMU.npy'))  # 159645 * 1
        self.texPC = _load(osp.join(d, 'texPC.npy'))  # 159645 * 199
        self.texEV = _load(osp.join(d, 'texEV.npy'))  # 199 * 1
        self.keypoints = _load(osp.join(d, 'keypoints_sim.npy'))
        self.n_tex_para = len(self.texEV)
        self.pose_noise = opt.pose_noise
        self.large_pose = opt.large_pose
        u = u_shp + u_exp
        # tri = sio.loadmat('/home/sensetime/Documents/3DDFA/visualize/tri.mat')['tri']   # 3 * 53215
        tri = sio.loadmat('./3ddfa/visualize/tri.mat')['tri']  # 3 * 53215
        faces_np = np.expand_dims(tri.T, axis=0).astype(np.int32) - 1

        self.std_size = 120

        self.current_gpu = opt.gpu_ids
        with torch.cuda.device(self.current_gpu):
            self.faces = torch.from_numpy(faces_np).cuda()
            self.renderer = nr.Renderer(camera_mode='look',
                                        image_size=self.render_size,
                                        perspective=False,
                                        light_intensity_directional=0,
                                        light_intensity_ambient=1)
            self.u_cuda = torch.from_numpy(u.astype(np.float32)).cuda()
            self.w_shp_cuda = torch.from_numpy(w_shp.astype(np.float32)).cuda()
            self.w_exp_cuda = torch.from_numpy(w_exp.astype(np.float32)).cuda()
Esempio n. 13
0
 def __init__(self,
              vertices,
              faces,
              img_size=64,
              mode='silhouettes',
              dataset='CVPR18'):
     super(Mesh_Renderer, self).__init__()
     if dataset == 'CVPR18':
         self.elevation = 30.
         self.distance = 2.732
         vAngle = 15
     elif dataset == 'NIPS17':
         vAngle = 14.9314
     self.vertices = vertices
     self.register_buffer('faces', faces)
     self.img_size = img_size
     # create textures
     texture_size = 2
     textures = torch.ones(self.faces.shape[0],
                           self.faces.shape[1],
                           texture_size,
                           texture_size,
                           texture_size,
                           3,
                           dtype=torch.float32)
     self.register_buffer('textures', textures)
     self.mode = mode
     # setup renderer
     renderer = nr.Renderer(camera_mode='look_at',
                            image_size=self.img_size,
                            viewing_angle=vAngle)
     self.renderer = renderer
Esempio n. 14
0
 def __init__(self, image_size, anti_aliasing, camera_mode, perspective):
     renderer = neural_renderer.Renderer(image_size=image_size,
                                         anti_aliasing=anti_aliasing,
                                         camera_mode=camera_mode,
                                         perspective=perspective,
                                         background_color=[0, 0, 0])
     self.renderer = renderer
Esempio n. 15
0
    def __init__(self, cfg):
        super(DepthRenderer, self).__init__()
        min_depth = cfg.MODEL.MVSNET.MIN_DEPTH
        max_depth = min_depth + (cfg.MODEL.MVSNET.DEPTH_INTERVAL \
                                    * cfg.MODEL.MVSNET.NUM_DEPTHS)
        self.renderer = nr.Renderer(
            camera_mode='projection',
            near=min_depth, far=max_depth,
            anti_aliasing=False
        )
        fx, fy = cfg.MODEL.MVSNET.FOCAL_LENGTH
        cx, cy = cfg.MODEL.MVSNET.PRINCIPAL_POINT
        self.camera_k = torch.tensor(
            [[fx, 0, cx],
             [0, fy, cy],
             [0, 0, 1]],
            dtype=torch.float32
        )
        self.dist_coeffs = torch.zeros(5, dtype=torch.float32)

        # conversion from shapenet convention (East-Up_South)
        # to renderer convention (East-Down-North)
        # final rotation: R_renderer_shapenet * extrinsics
        # inverse y and z, equivalent to inverse x, but gives positive z
        rvec = np.array([np.pi, 0., 0.], dtype=np.float32)
        R = cv2.Rodrigues(rvec)[0]
        T = np.eye(4, dtype=np.float32)
        T[:3, :3] = R
        self.T_renderer_shapenet = torch.from_numpy(T)
        self.T_shapenet_renderer = torch.inverse(self.T_renderer_shapenet)
Esempio n. 16
0
    def test_texture(self):
        position, rotation = position_rotation_from_angles(2, 15, 30)
        camera = nr.Camera(position=position, rotation=rotation)
        renderer = nr.Renderer(camera=camera)

        vertices, faces, textures = nr.load_obj(os.path.join(
            data_dir, '1cde62b063e14777c9152a706245d48/model.obj'),
                                                load_texture=True)

        images = renderer(vertices[None, :, :], faces[None, :, :],
                          textures[None, :, :, :, :, :]).permute(
                              0, 2, 3, 1).detach().cpu().numpy()
        image = (images[0] * 255).astype(np.uint8)
        imsave(os.path.join(data_dir, 'car.png'), image)

        vertices, faces, textures = nr.load_obj(os.path.join(
            data_dir, '4e49873292196f02574b5684eaec43e9/model.obj'),
                                                load_texture=True,
                                                texture_size=16)
        position, rotation = position_rotation_from_angles(2, 15, -90)
        renderer.camera.position = position
        renderer.camera.rotation = rotation
        images = renderer(vertices[None, :, :], faces[None, :, :],
                          textures[None, :, :, :, :, :]).permute(
                              0, 2, 3, 1).detach().cpu().numpy()
        image = (images[0] * 255).astype(np.uint8)
        imsave(os.path.join(data_dir, 'display.png'), image)
Esempio n. 17
0
    def __init__(self, filename_obj, filename_ref=None):
        super(Model, self).__init__()

        with self.init_scope():
            # load .obj
            vertices, faces = neural_renderer.load_obj(filename_obj)
            self.vertices = vertices[None, :, :]
            self.faces = faces[None, :, :]

            # create textures
            texture_size = 2
            textures = np.ones((1, self.faces.shape[1], texture_size,
                                texture_size, texture_size, 3), 'float32')
            self.textures = textures

            # load reference image
            if filename_ref is not None:
                self.image_ref = (scipy.misc.imread(filename_ref).max(-1) !=
                                  0).astype('float32')
            else:
                self.image_ref = None

            # camera parameters
            self.camera_position = chainer.Parameter(
                np.array([6, 10, -14], 'float32'))

            # setup renderer
            renderer = neural_renderer.Renderer()
            renderer.eye = self.camera_position
            self.renderer = renderer
Esempio n. 18
0
    def stest_forward_case2(self):
        data = [[
            './tests/data/4e49873292196f02574b5684eaec43e9/model.obj',
            neural_renderer.get_points_from_angles(2.5, 10, -90),
            './tests/data/4e49873292196f02574b5684eaec43e9.png',
        ],
                [
                    './tests/data/1cde62b063e14777c9152a706245d48/model.obj',
                    neural_renderer.get_points_from_angles(2.5, 10, 60),
                    './tests/data/1cde62b063e14777c9152a706245d48.png',
                ]]

        renderer = neural_renderer.Renderer()
        renderer.draw_backside = False
        for i, (filename, viewpoint, reference) in enumerate(data):
            renderer.viewpoints = viewpoint
            ref = neural_renderer.imread(reference)

            vertices, faces, vertices_t, faces_t, textures = neural_renderer.load_obj(
                filename, load_textures=True)
            vertices, faces, vertices_t, faces_t, textures = neural_renderer.to_gpu(
                (vertices[None, :, :], faces, vertices_t[None, :, :], faces_t,
                 textures[None, :, :, :]))

            images = renderer.render(vertices, faces, vertices_t, faces_t,
                                     textures).data
            image = images[0].transpose((1, 2, 0))
            # imageio.toimage(image.get(), cmin=0, cmax=1).save(reference)

            chainer.testing.assert_allclose(ref, image, atol=1e-2)
    def test_backward_case2(self):
        vertices = [[0.8, 0.8, 1.], [-0.5, -0.8, 1.], [0.8, -0.8, 1.]]
        faces = [[0, 1, 2]]
        pyi = 40
        pxi = 50

        renderer = neural_renderer.Renderer()
        renderer.image_size = 64
        renderer.anti_aliasing = False
        renderer.perspective = False

        vertices = chainer.Variable(cp.array(vertices, 'float32'))
        faces = cp.array(faces, 'int32')
        images = renderer.render_silhouettes(vertices[None, :, :],
                                             faces[None, :, :])
        loss = cf.sum(cf.absolute(images[:, pyi, pxi]))
        loss.backward()

        for i in range(3):
            for j in range(2):
                axis = 'x' if j == 0 else 'y'
                vertices2 = cp.copy(vertices.data)
                vertices2[i, j] -= 1. / vertices.grad[i, j]
                images = renderer.render_silhouettes(vertices2[None, :, :],
                                                     faces[None, :, :])
                image = np.tile(images[0].data.get()[:, :, None], (1, 1, 3))
                image[pyi, pxi] = [1, 0, 0]
                ref = scipy.misc.imread(
                    './tests/data/rasterize_silhouettes_case2_v%d_%s.png' %
                    (i, axis))
                ref = ref.astype('float32') / 255
                chainer.testing.assert_allclose(ref, image)
Esempio n. 20
0
    def __init__(self, opt):
        self.opt = opt
        self.render_size = opt.crop_size
        self.d = './3ddfa/train.configs'
        w_shp = _load(osp.join(self.d, 'w_shp_sim.npy'))
        w_exp = _load(osp.join(self.d, 'w_exp_sim.npy'))  # simplified version
        u_shp = _load(osp.join(self.d, 'u_shp.npy'))
        u_exp = _load(osp.join(self.d, 'u_exp.npy'))
        self.keypoints = _load(osp.join(self.d, 'keypoints_sim.npy'))
        self.pose_noise = getattr(opt, 'pose_noise', False)
        self.large_pose = getattr(opt, 'large_pose', False)
        u = u_shp + u_exp
        tri = sio.loadmat('./3ddfa/visualize/tri.mat')['tri']   # 3 * 53215
        faces_np = np.expand_dims(tri.T, axis=0).astype(np.int32) - 1

        self.std_size = 120

        self.current_gpu = opt.gpu_ids
        with torch.cuda.device(self.current_gpu):
            self.faces = torch.from_numpy(faces_np).cuda()
            self.renderer = nr.Renderer(camera_mode='look', image_size=self.render_size, perspective=False,
                                        light_intensity_directional=0, light_intensity_ambient=1)
            self.u_cuda = torch.from_numpy(u.astype(np.float32)).cuda()
            self.w_shp_cuda = torch.from_numpy(w_shp.astype(np.float32)).cuda()
            self.w_exp_cuda = torch.from_numpy(w_exp.astype(np.float32)).cuda()
Esempio n. 21
0
    def test_backward_case1(self):
        vertices = [
            [-0.9, -0.9, 2.],
            [-0.8, 0.8, 1.],
            [0.8, 0.8, 0.5]]
        faces = [[0, 1, 2]]

        renderer = nr.Renderer(camera_mode='look_at')
        renderer.image_size = 64
        renderer.anti_aliasing = False
        renderer.perspective = False
        renderer.camera_mode = 'none'

        vertices = torch.from_numpy(np.array(vertices, np.float32)).cuda()
        faces = torch.from_numpy(np.array(faces, np.int32)).cuda()
        vertices, faces = utils.to_minibatch((vertices, faces))
        vertices.requires_grad = True

        images = renderer(vertices, faces, mode='depth')
        loss = torch.sum((images[0, 15, 20] - 1)**2)
        loss.backward()
        grad = vertices.grad.clone()
        grad2 = np.zeros_like(grad)

        for i in range(3):
            for j in range(3):
                eps = 1e-3
                vertices2 = vertices.clone()
                vertices2[i, j] += eps
                images = renderer.render_depth(vertices2, faces)
                loss2 = torch.sum((images[0, 15, 20] - 1)**2)
                grad2[i, j] = ((loss2 - loss) / eps).item()

        assert(np.allclose(grad, grad2, atol=1e-3))
    def __init__(self, vertices, faces, filename_ref, camera_x, camera_y,
                 camera_z):
        super(ModelMorphing, self).__init__()

        self.vertices = nn.Parameter(vertices)
        self.register_buffer('faces', faces)

        # create textures
        texture_size = 2
        textures = torch.ones(1,
                              self.faces.shape[1],
                              texture_size,
                              texture_size,
                              texture_size,
                              3,
                              dtype=torch.float32)
        self.register_buffer('textures', textures)

        # load reference image
        image_ref = torch.from_numpy(
            imread(filename_ref).astype(np.float32).mean(-1) / 255.)[None, ::]
        self.register_buffer('image_ref', image_ref)

        # save camera parameters
        self.camera_x = camera_x
        self.camera_y = camera_y
        self.camera_z = camera_z

        # setup renderer
        renderer = nr.Renderer(camera_mode='look_at', far=200)
        renderer.eye = (self.camera_x, self.camera_y, self.camera_z)
        self.renderer = renderer
Esempio n. 23
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-i', '--filename_input', type=str, default=os.path.join(data_dir, 'teapot.obj'))
    parser.add_argument('-o', '--filename_output', type=str, default=os.path.join(data_dir, 'example1.gif'))
    parser.add_argument('-g', '--gpu', type=int, default=0)
    args = parser.parse_args()

    # other settings
    camera_distance = 2.732
    elevation = 30
    texture_size = 2

    # load .obj
    vertices, faces = nr.load_obj(args.filename_input)
    vertices = vertices[None, :, :]  # [num_vertices, XYZ] -> [batch_size=1, num_vertices, XYZ]
    faces = faces[None, :, :]  # [num_faces, 3] -> [batch_size=1, num_faces, 3]

    # create texture [batch_size=1, num_faces, texture_size, texture_size, texture_size, RGB]
    textures = torch.ones(1, faces.shape[1], texture_size, texture_size, texture_size, 3, dtype=torch.float32).cuda()

    # to gpu

    # create renderer
    renderer = nr.Renderer(camera_mode='look_at')

    # draw object
    loop = tqdm.tqdm(range(0, 360, 4))
    writer = imageio.get_writer(args.filename_output, mode='I')
    for num, azimuth in enumerate(loop):
        loop.set_description('Drawing')
        renderer.eye = nr.get_points_from_angles(camera_distance, elevation, azimuth)
        images = renderer(vertices, faces, textures)  # [batch_size, RGB, image_size, image_size]
        image = images.detach().cpu().numpy()[0].transpose((1, 2, 0))  # [image_size, image_size, RGB]
        writer.append_data((255*image).astype(np.uint8))
    writer.close()
Esempio n. 24
0
    def test_texture(self):
        renderer = neural_renderer.Renderer()

        vertices, faces, textures = neural_renderer.load_obj(
            './tests/data/1cde62b063e14777c9152a706245d48/model.obj',
            load_texture=True)

        vertices = chainer.cuda.to_gpu(vertices)
        faces = chainer.cuda.to_gpu(faces)
        textures = chainer.cuda.to_gpu(textures)
        renderer.eye = neural_renderer.get_points_from_angles(2, 15, 30)
        images = renderer.render(vertices[None, :, :], faces[None, :, :],
                                 textures[None, :, :, :, :, :]).data.get()
        scipy.misc.imsave('./tests/data/car.png',
                          scipy.misc.toimage(images[0]))

        vertices, faces, textures = neural_renderer.load_obj(
            './tests/data/4e49873292196f02574b5684eaec43e9/model.obj',
            load_texture=True,
            texture_size=16)
        vertices = chainer.cuda.to_gpu(vertices)
        faces = chainer.cuda.to_gpu(faces)
        textures = chainer.cuda.to_gpu(textures)
        renderer.eye = neural_renderer.get_points_from_angles(2, 15, -90)
        images = renderer.render(vertices[None, :, :], faces[None, :, :],
                                 textures[None, :, :, :, :, :]).data.get()
        scipy.misc.imsave('./tests/data/display.png',
                          scipy.misc.toimage(images[0]))
Esempio n. 25
0
    def test_backward_case2(self):
        """Backward if non-zero gradient is on a face."""

        vertices = [
            [0.8, 0.8, 1.],
            [-0.5, -0.8, 1.],
            [0.8, -0.8, 1.]]
        faces = [[0, 1, 2]]
        pyi = 40
        pxi = 50
        grad_ref = [
            [0.98646867, 1.04628897, 0.],
            [-1.03415668, - 0.10403691, 0.],
            [3.00094461, - 1.55173182, 0.],
        ]

        renderer = nr.Renderer(camera_mode='look_at')
        renderer.image_size = 64
        renderer.anti_aliasing = False
        renderer.perspective = False

        vertices = torch.from_numpy(np.array(vertices, np.float32)).cuda()
        faces = torch.from_numpy(np.array(faces, np.int32)).cuda()
        grad_ref = torch.from_numpy(np.array(grad_ref, np.float32)).cuda()
        vertices, faces, grad_ref = utils.to_minibatch((vertices, faces, grad_ref))
        vertices.requires_grad = True
        images = renderer(vertices, faces, mode='silhouettes')
        loss = torch.sum(torch.abs(images[:, pyi, pxi]))
        loss.backward()

        assert(torch.allclose(vertices.grad, grad_ref, rtol=1e-2))
Esempio n. 26
0
    def __init__(self,
                 args):
        super(CircleNet, self).__init__()
        self.num_nudes = args.num_nodes
        self.dec_dim = args.dec_dim
        self.dec_size = args.dec_size
        self.image_size = args.image_size
        self.stages = args.stages

        if args.arch == 'resnet':
            kwargs = {'stages': self.stages}
            res_dims = [256, 512, 1024, 2048]
            self.backbone = resnet_encoder(pretrained=True, **kwargs)
            dec_skip_dims = [res_dims[i] for i in self.stages][::-1]
            self.disp = resnet_decoder(dec_skip_dims, 2, self.dec_dim, self.dec_size, drop=args.drop)
        elif args.arch == 'unet':
            self.backbone = unet_encoder(args.enc_dim, drop=args.drop)
            self.disp = unet_decoder(self.backbone.dims, drop=args.drop)

        self.texture_size = 2
        self.camera_distance = 1
        self.elevation = 0
        self.azimuth = 0
        self.renderer = nr.Renderer(camera_mode='look_at', image_size=self.image_size, light_intensity_ambient=1,
                                    light_intensity_directional=1, perspective=False)
Esempio n. 27
0
    def __init__(self, filename_obj, filename_ref):
        super(Model, self).__init__()

        # load .obj
        vertices, faces = nr.load_obj(filename_obj)
        self.vertices = nn.Parameter(vertices[None, :, :])
        self.register_buffer('faces', faces[None, :, :])

        # create textures
        texture_size = 2
        textures = torch.ones(1,
                              self.faces.shape[1],
                              texture_size,
                              texture_size,
                              texture_size,
                              3,
                              dtype=torch.float32)
        self.register_buffer('textures', textures)

        # load reference image
        image_ref = torch.from_numpy(
            imread(filename_ref).astype(np.float32).mean(-1) / 255.)[None, ::]
        self.register_buffer('image_ref', image_ref)

        # setup renderer
        renderer = nr.Renderer(camera_mode='look_at')
        self.renderer = renderer
Esempio n. 28
0
    def test_texture(self):
        renderer = nr.Renderer(camera_mode='look_at')

        vertices, faces, textures = nr.load_obj(os.path.join(
            data_dir, '1cde62b063e14777c9152a706245d48/model.obj'),
                                                load_texture=True)

        renderer.eye = nr.get_points_from_angles(2, 15, 30)
        images, depth, silhouette = renderer.render(
            vertices[None, :, :], faces[None, :, :],
            textures[None, :, :, :, :, :])
        images = images.permute(0, 2, 3, 1).detach().cpu().numpy()
        silhouette = silhouette.detach().cpu().numpy()
        depth = depth.detach().cpu().numpy()
        imsave(os.path.join(data_dir, 'car.png'), images[0])
        cv2.imshow("r", images[0, :, :, ::-1])
        cv2.imshow("d", ColorizeDepth(depth[0], 1.5, 2.5))
        cv2.imshow("s", silhouette[0])
        cv2.waitKey()

        vertices, faces, textures = nr.load_obj(os.path.join(
            data_dir, '4e49873292196f02574b5684eaec43e9/model.obj'),
                                                load_texture=True,
                                                texture_size=16)
        renderer.eye = nr.get_points_from_angles(2, 15, -90)
        images, _, _ = renderer.render(vertices[None, :, :], faces[None, :, :],
                                       textures[None, :, :, :, :, :])
        images = images.permute(0, 2, 3, 1).detach().cpu().numpy()
        imsave(os.path.join(data_dir, 'display.png'), images[0])
Esempio n. 29
0
    def __init__(self, filename_obj, filename_ref=None):
        super(Model, self).__init__()
        # load .obj
        vertices, faces = nr.load_obj(filename_obj)
        self.register_buffer('vertices', vertices[None, :, :])
        self.register_buffer('faces', faces[None, :, :])

        # create textures
        texture_size = 2
        textures = torch.ones(1,
                              self.faces.shape[1],
                              texture_size,
                              texture_size,
                              texture_size,
                              3,
                              dtype=torch.float32)
        self.register_buffer('textures', textures)

        # load reference image
        image_ref = torch.from_numpy(
            (imread(filename_ref).max(-1) != 0).astype(np.float32))
        self.register_buffer('image_ref', image_ref)

        # camera parameters
        self.camera_position = nn.Parameter(
            torch.from_numpy(np.array([6, 10, -14], dtype=np.float32)))

        # setup renderer
        renderer = nr.Renderer(camera_mode='look_at')
        renderer.eye = self.camera_position
        self.renderer = renderer
    def __init__(self, filename_obj):
        super(Model, self).__init__()
        # Load mesh vertices and faces
        vertices, faces = load_off(filename_obj)

        self.register_buffer('vertices', vertices[None, :, :])
        self.register_buffer('faces', faces[None, :, :])

        # create textures
        texture_size = 2
        textures = torch.ones(1,
                              self.faces.shape[1],
                              texture_size,
                              texture_size,
                              texture_size,
                              3,
                              dtype=torch.float32)
        self.register_buffer('textures', textures)

        # camera parameters
        self.camera_position = nn.Parameter(
            torch.from_numpy(np.array([3, 5, -7], dtype=np.float32)))

        # setup renderer
        renderer = nr.Renderer(camera_mode='look_at')
        renderer.eye = self.camera_position
        self.renderer = renderer

        self.entropy = 0