コード例 #1
0
def render_multiview(raw_vtx, pred_tex, idx=0):
    angles = [0, 45, 90, 135, 180, 225, 270, 315]
    with torch.no_grad():
        # Render from multiple viewpoints
        rad = -90 / 180 * np.pi
        q0 = torch.Tensor([np.cos(-rad/2), 0, 0, np.sin(-rad/2)]).cuda()
        rad = 110 / 180 * np.pi
        q1 = torch.Tensor([np.cos(-rad/2), 0, np.sin(-rad/2), 0]).cuda()
        q0 = qmul(q0, q1)

        rot = []
        for angle in angles:
            rad = angle / 180 *np.pi * 0.8
            q = torch.Tensor([np.cos(-rad/2), 0, 0, np.sin(-rad/2)]).cuda()
            q = qmul(q0, q)
            rot.append(q)
        rot = torch.stack(rot, dim=0)

        raw_vtx = raw_vtx[idx:idx+1].expand(rot.shape[0], -1, -1)
        pred_tex = pred_tex[idx:idx+1].expand(rot.shape[0], -1, -1, -1)
        
        vtx = qrot(rot, raw_vtx)*0.9
        vtx[:, :, 1:] *= -1

        pred_view, _ = mesh_template.forward_renderer(renderer, vtx, pred_tex)

        pred_view = pred_view.cpu()
        nrows = 2
        ncols= 4
        pred_view = pred_view.view(nrows, ncols, pred_view.shape[1], pred_view.shape[2], pred_view.shape[3])
        pred_view = pred_view.permute(0, 2, 1, 3, 4).contiguous()
        pred_view = pred_view.view(args.image_resolution*nrows, args.image_resolution*ncols, 3)
        render = (pred_view.cpu().numpy() + 1)/2
        return render
コード例 #2
0
            def render_and_score(input_mesh_map, input_texture, output_array):
                vtx = mesh_template.get_vertex_positions(input_mesh_map)
                vtx = qrot(data['rotation'], data['scale'].unsqueeze(-1)*vtx) + data['translation'].unsqueeze(1)
                vtx = vtx * torch.Tensor([1, -1, -1]).to(vtx.device)

                image_pred, _ = mesh_template.forward_renderer(renderer, vtx, input_texture, len(gpu_ids))
                image_pred = image_pred.permute(0, 3, 1, 2)/2 + 0.5
                
                emb = forward_inception_batch(inception_model, image_pred)
                output_array.append(emb)
                return image_pred # Return images for visualization
コード例 #3
0
def transform_vertices(vtx, gt_scale, gt_translation, gt_rot, gt_idx):
    if args.optimize_deltas:
        translation_delta, scale_delta = dataset_params(gt_idx, 'deltas')
    else:
        scale_delta = 0
        translation_delta = 0
    vtx = qrot(gt_rot, (gt_scale + scale_delta).unsqueeze(-1)*vtx) + (gt_translation + translation_delta).unsqueeze(1)
    vtx = vtx * torch.Tensor([1, -1, -1]).to(vtx.device)
    if args.optimize_z0:
        z0 = dataset_params(gt_idx, 'z0').unsqueeze(-1)
        z = vtx[:, :, 2:]
        factor = (z0 + z/2)/(z0 - z/2)
        vtx = torch.cat((vtx[:, :, :2]*factor, z), dim=2)
    else:
        assert 'ds_z0' not in dataset_params.__dict__, 'Model was trained with --optimize_z0'
    return vtx
コード例 #4
0
        generator_running_avg.eval()
        noise = noise.cuda()
        pred_tex, pred_mesh_map, attn_map = trainer('inference', None, None, C=c, caption=caption, noise=noise)
        vtx = mesh_template.get_vertex_positions(pred_mesh_map)
        vtx_obj = vtx.clone()
        vtx_obj[..., :] = vtx_obj[..., [0, 2, 1]] # Swap Y and Z (the result is Y up)
        output_dir = os.path.join('results', args.weights)
        pathlib.Path(output_dir).mkdir(parents=True, exist_ok=True)
        for i, v in enumerate(vtx_obj):
            mesh_template.export_obj(os.path.join(output_dir, f'mesh_{i}'), v, pred_tex[i]/2 + 0.5)
            
        rotation = train_ds.data['rotation'][indices].cuda()
        scale = train_ds.data['scale'][indices].cuda()
        translation = train_ds.data['translation'][indices].cuda()
        
        vtx = qrot(rotation, scale.unsqueeze(-1)*vtx) + translation.unsqueeze(1)
        vtx = vtx * torch.Tensor([1, -1, -1]).to(vtx.device)

        image_pred, alpha_pred = mesh_template.forward_renderer(renderer, vtx, pred_tex,
                                                                num_gpus=len(gpu_ids),
                                                                return_hardmask=True)
        image_pred[alpha_pred.expand_as(image_pred) == 0] = 1
        image_pred = image_pred.permute(0, 3, 1, 2)/2 + 0.5
        image_pred = F.avg_pool2d(image_pred, 2) # Anti-aliasing

        import imageio
        import torchvision
        image_grid = torchvision.utils.make_grid(image_pred, nrow=8, padding=0)
        image_grid = (image_grid.permute(1, 2, 0)*255).clamp(0, 255).cpu().byte().numpy()
        imageio.imwrite(f'results/{args.weights}.png', image_grid)