Exemplo n.º 1
0
def main():
    args = parse_arguments()

    ###########################
    # Setup model
    ###########################

    model = Model(args.mesh, args.image, args)
    model.cuda()

    ###########################
    # Optimize
    ###########################

    loop = tqdm.tqdm(range(args.epochs))
    loop.set_description('Optimizing')

    optimizer = torch.optim.Adam(
        [p for p in model.parameters() if p.requires_grad])

    os.makedirs(args.output_path, exist_ok=True)
    writer = imageio.get_writer(os.path.join(
        args.output_path, 'example2_optimization.gif'), mode='I')
    for i in loop:
        optimizer.zero_grad()

        loss = model()

        loss.backward()
        optimizer.step()

        images, _, _ = model.renderer(
            model.vertices, model.faces, model.textures)

        image = images.detach()[0].permute(1, 2, 0).cpu().numpy()
        writer.append_data((255 * image).astype(np.uint8))

    writer.close()

    ###########################
    # Render optimized mesh
    ###########################

    loop = tqdm.tqdm(range(0, 360, 4))
    loop.set_description('Drawing')

    os.makedirs(args.output_path, exist_ok=True)
    writer = imageio.get_writer(os.path.join(
        args.output_path, 'example2_mesh.gif'), mode='I')
    for azimuth in loop:
        model.renderer.eye = get_points_from_angles(
            args.camera_distance, args.elevation, azimuth)

        images, _, _ = model.renderer(
            model.vertices, model.faces, model.textures)

        image = images.detach()[0].permute(1, 2, 0).cpu().numpy()
        writer.append_data((255 * image).astype(np.uint8))

    writer.close()
Exemplo n.º 2
0
 def setup_camera(self):
     self.renderer = kaolin.graphics.NeuralMeshRenderer(
         camera_mode='look_at')
     self.renderer.light_intensity_directional = 0.0
     self.renderer.light_intensity_ambient = 1.0
     camera_distance = 0.4
     elevation = 0.0
     azimuth = 0.0
     self.renderer.eye = get_points_from_angles(camera_distance, elevation,
                                                azimuth)
Exemplo n.º 3
0
    def forward(self):
        ###########################
        # Render
        ###########################

        self.renderer.eye = get_points_from_angles(
            self.args.camera_distance, self.args.elevation, self.args.azimuth)
        image = self.renderer(self.vertices, self.faces, mode='silhouettes')
        loss = torch.sum((image - self.image_ref[None, :, :]) ** 2)

        return loss
Exemplo n.º 4
0
def main():
    args = parse_arguments()

    ###########################
    # Load mesh
    ###########################

    mesh = TriangleMesh.from_obj(args.mesh)
    mesh.cuda()
    # Normalize into unit cube, and expand such that batch size = 1
    vertices = normalize_vertices(mesh.vertices).unsqueeze(0)
    faces = mesh.faces.unsqueeze(0)

    ###########################
    # Generate texture (NMR format)
    ###########################

    textures = torch.ones(1,
                          faces.shape[1],
                          args.texture_size,
                          args.texture_size,
                          args.texture_size,
                          3,
                          dtype=torch.float32,
                          device='cuda')

    ###########################
    # Render
    ###########################

    renderer = Renderer(camera_mode='look_at')

    loop = tqdm.tqdm(range(0, 360, 4))
    loop.set_description('Drawing')

    os.makedirs(args.output_path, exist_ok=True)
    writer = imageio.get_writer(os.path.join(args.output_path, 'example1.gif'),
                                mode='I')
    for azimuth in loop:
        renderer.eye = get_points_from_angles(args.camera_distance,
                                              args.elevation, azimuth)

        images, _, _ = renderer(vertices, faces, textures)

        image = images.detach()[0].permute(
            1, 2, 0).cpu().numpy()  # [image_size, image_size, RGB]
        writer.append_data((255 * image).astype(np.uint8))

    writer.close()
Exemplo n.º 5
0
    def forward(self):
        ###########################
        # Render
        ###########################

        self.renderer.eye = get_points_from_angles(
            self.args.camera_distance, self.args.elevation,
            np.random.uniform(0, 360)
        )
        image, _, _ = self.renderer(
            self.vertices,
            self.faces,
            torch.tanh(self.textures)
        )
        loss = torch.sum((image - self.image_ref) ** 2)

        return loss
Exemplo n.º 6
0
def render_image(gaussian_blur,
                 gaussian_filter,
                 args,
                 vertices,
                 faces,
                 textures,
                 smpl_output,
                 image_size=256):

    renderer = Renderer(camera_mode='look_at')
    azimuth = 180
    renderer.eye = get_points_from_angles(args.camera_distance, args.elevation,
                                          azimuth)
    images, _, _ = renderer(vertices, faces, textures)
    true_image = images[0].permute(1, 2, 0)
    if gaussian_blur:
        true_image = gaussian_filter(
            true_image.unsqueeze(0).permute(0, 3, 2, 1)).permute(0, 3, 2, 1)[0]
    return true_image
Exemplo n.º 7
0
def optimize_sequence(true_poses, gt_translation, args, save_path: str):
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    smpl_file_name = "../SMPLs/smpl/models/basicModel_f_lbs_10_207_0_v1.0.0.pkl"
    uv_map_file_name = "../textures/smpl_uv_map.npy"
    uv = np.load(uv_map_file_name)
    texture_file_name = "../textures/female1.jpg"
    with open(texture_file_name, 'rb') as file:
        texture = Image.open(BytesIO(file.read()))
    results = []
    model = smplx.create(smpl_file_name, model_type='smpl')
    model = model.to(device)
    gaussian_filter = get_gaussian_filter(args.kernel_size, args.sigma)
    gaussian_filter = gaussian_filter.to(device)
    betas = torch.tensor([[
        -0.3596, -1.0232, -1.7584, -2.0465, 0.3387, -0.8562, 0.8869, 0.5013,
        0.5338, -0.0210
    ]]).to(device)

    true_images = []
    init_images = []
    result_images = []
    losses_frames = []
    pose_losses_frames = []
    iterations = args.iterations
    # for f_id, true_pose in tqdm(enumerate(true_poses[150:170:10])):
    for f_id, true_pose in tqdm(enumerate(true_poses[150:300:20])):
        if f_id > 0:
            iterations = 200
        losses = []
        pose_losses = []
        output_true = model(betas=betas,
                            return_verts=True,
                            body_pose=true_pose)
        faces = torch.tensor(model.faces * 1.0).to(device)
        mesh_true = TriangleMesh.from_tensors(output_true.vertices[0], faces)
        vertices_true = mesh_true.vertices.unsqueeze(0)
        faces = mesh_true.faces.unsqueeze(0)
        textures = torch.ones(1,
                              faces.shape[1],
                              args.texture_size,
                              args.texture_size,
                              args.texture_size,
                              3,
                              dtype=torch.float32,
                              device='cuda')
        renderer_full = Renderer(camera_mode='look_at',
                                 image_size=args.image_size)
        renderer_full.eye = get_points_from_angles(args.camera_distance,
                                                   args.elevation,
                                                   args.azimuth)
        images, _, _ = renderer_full(vertices_true, faces, textures)
        true_image = images[0].permute(1, 2, 0)
        true_images.append(
            (255 * true_image.detach().cpu().numpy()).astype(np.uint8))
        if args.gaussian_blur:
            true_image = gaussian_filter(
                true_image.unsqueeze(0).permute(0, 3, 2,
                                                1)).permute(0, 3, 2, 1)[0]
        true_image = true_image.detach()
        if f_id == 0 or args.init_pose == "zero":
            perturbed_pose = torch.zeros(69).view(1, -1).to(device)
        else:
            perturbed_pose = perturbed_pose
        perturbed_pose = Variable(perturbed_pose, requires_grad=True)
        optim = torch.optim.Adam([perturbed_pose], lr=1e-2)
        image_size = args.image_size
        if args.coarse_to_fine:
            image_size = int(image_size / 2**args.coarse_to_fine_steps)
            image_size = args.image_size
        kernel_size = args.kernel_size
        renderer = renderer_full
        for i in range(iterations):
            # print("Iter: ", i, "kernel size: ", kernel_size)
            if args.blur_to_no_blur and i % int(
                    iterations / args.blur_to_no_blur_steps) == 0:
                gaussian_filter = get_gaussian_filter(kernel_size,
                                                      sigma=args.sigma)
                gaussian_filter = gaussian_filter.to(device)
                images, _, _ = renderer(vertices_true, faces, textures)
                true_image = images[0].permute(1, 2, 0)
                true_image = gaussian_filter(
                    true_image.unsqueeze(0).permute(0, 3, 2,
                                                    1)).permute(0, 3, 2, 1)[0]
                true_image = true_image.detach()
                kernel_size = int(kernel_size / 2)
            if args.coarse_to_fine and i % int(
                    iterations / args.coarse_to_fine_steps) == 0:
                renderer = Renderer(camera_mode='look_at',
                                    image_size=image_size)
                renderer.eye = get_points_from_angles(args.camera_distance,
                                                      args.elevation,
                                                      args.azimuth)
                images, _, _ = renderer(vertices_true, faces, textures)
                true_image = images[0].permute(1, 2, 0)
                if args.gaussian_blur:
                    true_image = gaussian_filter(
                        true_image.unsqueeze(0).permute(0, 3, 2, 1)).permute(
                            0, 3, 2, 1)[0]
                true_image = true_image.detach()
                image_size *= 2
            optim.zero_grad()
            output = model(betas=betas,
                           return_verts=True,
                           body_pose=perturbed_pose)
            vertices_goal = output.vertices[0]
            mesh = TriangleMesh.from_tensors(vertices_goal, faces)
            vertices = vertices_goal.unsqueeze(0)
            images, _, _ = renderer(vertices, faces, textures)
            image = images[0].permute(1, 2, 0)
            if i == 0:
                perturbed_images, _, _ = renderer_full(vertices, faces,
                                                       textures)
                perturbed_image = perturbed_images[0].permute(1, 2, 0)
                perturbed_image = (
                    255 * perturbed_image.detach().cpu().numpy()).astype(
                        np.uint8)
                init_images.append(perturbed_image)
            if args.gaussian_blur:
                image = gaussian_filter(
                    image.unsqueeze(0).permute(0, 3, 2,
                                               1)).permute(0, 3, 2, 1)[0]
            if i == iterations - 1:
                images, _, _ = renderer_full(vertices, faces, textures)
                result_image = images[0].permute(1, 2, 0)
            if args.photo_loss == "L1":
                loss = (image - true_image).abs().mean()
            else:
                loss = ((image - true_image)**2).mean().sqrt()
            pose_loss = (perturbed_pose - true_pose).abs().mean()
            # angle prior for elbow and knees
            if args.angle_prior:
                # Angle prior for knees and elbows
                angle_prior_loss = (args.angle_prior_weight**
                                    2) * angle_prior(perturbed_pose).sum(
                                        dim=-1)[0]
                print("Angle Prior: ", angle_prior_loss.item())
                # Pose prior loss
                pose_prior = MaxMixturePrior(prior_folder='SPIN/data',
                                             num_gaussians=8,
                                             dtype=torch.float32).to(device)
                pose_prior_loss = (args.pose_prior_weight**2) * pose_prior(
                    perturbed_pose, betas)[0]
                print("Pose Prior: ", pose_prior_loss.item())
                loss += angle_prior_loss + pose_prior_loss
                # Pose prior loss
                # pose_prior_loss = (pose_prior_weight ** 2) * pose_prior(body_pose, betas)
            print("Iter: {} Loss: {}".format(i, loss.item()))
            loss.backward()
            optim.step()
            losses.append(loss.item())
            pose_losses.append(pose_loss.item())
            imageio.imwrite("{}/iteration_{:03d}.png".format(save_path, i),
                            (255 * image.detach().cpu().numpy()).astype(
                                np.uint8))
        result_images.append(
            (255 * result_image.detach().cpu().numpy()).astype(np.uint8))
        losses_frames.append(losses)
        pose_losses_frames.append(pose_losses)
    return losses_frames, pose_losses_frames, result_images, init_images, true_images
Exemplo n.º 8
0
def optimize(args, save_path):
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    smpl_file_name = "../SMPLs/smpl/models/basicModel_f_lbs_10_207_0_v1.0.0.pkl"
    uv_map_file_name = "../textures/smpl_uv_map.npy"
    uv = np.load(uv_map_file_name)
    texture_file_name = "../textures/female1.jpg"
    with open(texture_file_name, 'rb') as file:
        texture = Image.open(BytesIO(file.read()))
    model = smplx.create(smpl_file_name, model_type='smpl')
    model = model.to(device)

    gaussian_filter = get_gaussian_filter(args.kernel_size, args.sigma)
    gaussian_filter = gaussian_filter.to(device)

    betas = torch.tensor([[
        -0.3596, -1.0232, -1.7584, -2.0465, 0.3387, -0.8562, 0.8869, 0.5013,
        0.5338, -0.0210
    ]]).to(device)
    if args.perturb_betas:
        perturbed_betas = Variable(torch.tensor(
            [[3, -1.0232, 1.8, 2.0465, -0.3387, 0.9, 0.8869, -0.5013, -1,
              2]]).to(device),
                                   requires_grad=True)
    else:
        perturbed_betas = betas
    expression = torch.tensor([[
        2.7228, -1.8139, 0.6270, -0.5565, 0.3251, 0.5643, -1.2158, 1.4149,
        0.4050, 0.6516
    ]]).to(device)
    perturbed_pose = torch.ones(69).view(1, -1).to(device) * np.deg2rad(4)
    #perturbed_pose[0, 38] = -np.deg2rad(60)
    #perturbed_pose[0, 41] = np.deg2rad(60)

    perturbed_pose = Variable(perturbed_pose, requires_grad=True)
    canonical_pose0 = torch.zeros(2).view(1, -1).to(device)
    canonical_pose1 = torch.zeros(35).view(1, -1).to(device)
    canonical_pose2 = torch.zeros(2).view(1, -1).to(device)
    canonical_pose3 = torch.zeros(27).view(1, -1).to(device)
    arm_angle_l = Variable(torch.tensor([-np.deg2rad(65)
                                         ]).float().view(1, -1).to(device),
                           requires_grad=True)
    arm_angle_r = Variable(torch.tensor([np.deg2rad(65)
                                         ]).float().view(1, -1).to(device),
                           requires_grad=True)
    leg_angle_l = Variable(torch.tensor([np.deg2rad(20)
                                         ]).float().view(1, -1).to(device),
                           requires_grad=True)

    output_true = model(betas=betas,
                        expression=expression,
                        return_verts=True,
                        body_pose=None)

    # Normalize vertices
    # output = model(betas=betas, expression=expression,
    #               return_verts=True, body_pose=perturbed_pose)

    # vertices_goal = output.vertices[0]
    # vertices_abs_max = torch.abs(vertices_goal).max().detach()
    # vertices_min = vertices_goal.min(0)[0][None, :].detach()
    # vertices_max = vertices_goal.max(0)[0][None, :].detach()

    faces = torch.tensor(model.faces * 1.0).to(device)

    mesh_true = TriangleMesh.from_tensors(output_true.vertices[0], faces)
    vertices_true = mesh_true.vertices.unsqueeze(0)
    # vertices = pre_normalize_vertices(mesh.vertices, vertices_min, vertices_max,
    #                                  vertices_abs_max).unsqueeze(0)

    faces = mesh_true.faces.unsqueeze(0)

    textures = torch.ones(1,
                          faces.shape[1],
                          args.texture_size,
                          args.texture_size,
                          args.texture_size,
                          3,
                          dtype=torch.float32,
                          device='cuda')
    renderer_full = Renderer(camera_mode='look_at', image_size=args.image_size)
    renderer_full.eye = get_points_from_angles(args.camera_distance,
                                               args.elevation, args.azimuth)
    images, _, _ = renderer_full(vertices_true, faces, textures)
    true_image = images[0].permute(1, 2, 0)
    if args.gaussian_blur:
        true_image = gaussian_filter(
            true_image.unsqueeze(0).permute(0, 3, 2, 1)).permute(0, 3, 2, 1)[0]
    true_image = true_image.detach()
    imageio.imwrite(save_path + "/true_image.png",
                    (255 * true_image.detach().cpu().numpy()).astype(np.uint8))

    if args.specific_angles_only and args.perturb_betas:
        optim = torch.optim.Adam(
            [arm_angle_l, arm_angle_r, leg_angle_l, perturbed_betas], lr=1e-2)
    elif args.specific_angles_only:
        optim = torch.optim.Adam([arm_angle_l, arm_angle_r, leg_angle_l],
                                 lr=1e-2)
    elif args.perturb_betas:
        optim = torch.optim.Adam([perturbed_pose, perturbed_betas], lr=1e-2)
    else:
        optim = torch.optim.Adam([perturbed_pose], lr=1e-2)
    results = []
    arm_parameters_l = []
    arm_parameters_r = []
    beta_diffs = []
    losses = []
    image_size = args.image_size
    if args.coarse_to_fine:
        image_size = int(image_size / 2**args.coarse_to_fine_steps)
    renderer = renderer_full
    for i in range(args.iterations):
        if args.coarse_to_fine and i % int(
                args.iterations / args.coarse_to_fine_steps) == 0:
            renderer = Renderer(camera_mode='look_at', image_size=image_size)
            renderer.eye = get_points_from_angles(args.camera_distance,
                                                  args.elevation, args.azimuth)
            images, _, _ = renderer(vertices_true, faces, textures)
            true_image = images[0].permute(1, 2, 0)
            if args.gaussian_blur:
                true_image = gaussian_filter(
                    true_image.unsqueeze(0).permute(0, 3, 2,
                                                    1)).permute(0, 3, 2, 1)[0]
            true_image = true_image.detach()
            image_size *= 2
        optim.zero_grad()
        if args.specific_angles_only:
            perturbed_pose = torch.cat([
                canonical_pose0, leg_angle_l, canonical_pose1, arm_angle_l,
                canonical_pose2, arm_angle_r, canonical_pose3
            ],
                                       dim=-1)
        output = model(betas=perturbed_betas,
                       expression=expression,
                       return_verts=True,
                       body_pose=perturbed_pose)

        vertices_goal = output.vertices[0]

        mesh = TriangleMesh.from_tensors(vertices_goal, faces)

        vertices = vertices_goal.unsqueeze(0)
        # vertices = pre_normalize_vertices(mesh.vertices, vertices_min, vertices_max,
        #                              vertices_abs_max).unsqueeze(0)

        images, _, _ = renderer(vertices, faces, textures)
        image = images[0].permute(1, 2, 0)
        if i == 0:
            perturbed_images, _, _ = renderer_full(vertices, faces, textures)
            perturbed_image = perturbed_images[0].permute(1, 2, 0)
            perturbed_image = perturbed_image.detach()
            imageio.imwrite(save_path + "/perturbed_image.png",
                            (255 *
                             perturbed_image.detach().cpu().numpy()).astype(
                                 np.uint8))
        if args.gaussian_blur:
            image = gaussian_filter(image.unsqueeze(0).permute(
                0, 3, 2, 1)).permute(0, 3, 2, 1)[0]
        loss = (image - true_image).abs().mean()
        loss.backward()
        optim.step()

        results.append((255 * image.detach().cpu().numpy()).astype(np.uint8))
        if args.specific_angles_only:
            arm_parameters_l.append(arm_angle_l.item())
            arm_parameters_r.append(arm_angle_r.item())
        if args.perturb_betas:
            beta_diffs.append((betas - perturbed_betas).abs().mean().item())
        losses.append(loss.item())
        print("Loss: ", loss.item())
    return losses, results, arm_parameters_l, arm_parameters_r, beta_diffs