示例#1
0
def visualize_pred(img,
                   category,
                   pred,
                   image_name,
                   mesh_path,
                   down_sample_rate=8,
                   device='cuda:0'):
    render_image_size = max(IMAGE_SIZES[category])
    crop_size = IMAGE_SIZES[category]

    cameras = OpenGLPerspectiveCameras(device=device, fov=12.0)
    raster_settings = RasterizationSettings(image_size=render_image_size,
                                            blur_radius=0.0,
                                            faces_per_pixel=1,
                                            bin_size=0)
    raster_settings1 = RasterizationSettings(image_size=render_image_size //
                                             down_sample_rate,
                                             blur_radius=0.0,
                                             faces_per_pixel=1,
                                             bin_size=0)
    rasterizer = MeshRasterizer(cameras=cameras,
                                raster_settings=raster_settings1)
    lights = PointLights(device=device, location=((2.0, 2.0, -2.0), ))
    phong_renderer = MeshRenderer(rasterizer=MeshRasterizer(
        cameras=cameras, raster_settings=raster_settings),
                                  shader=HardPhongShader(device=device,
                                                         lights=lights,
                                                         cameras=cameras))

    theta_pred = pred['theta']
    elevation_pred = pred['elevation']
    azimuth_pred = pred['azimuth']
    distance_pred = pred['distance']
    cad_idx = pred['cad_idx']
    dx = pred['dx'] * down_sample_rate
    dy = pred['dy'] * down_sample_rate

    x3d, xface = load_off(mesh_path + '/%02d.off' % cad_idx)

    verts = torch.from_numpy(x3d).to(device)
    verts = pre_process_mesh_pascal(verts)
    faces = torch.from_numpy(xface).to(device)

    verts_rgb = torch.ones_like(verts)[None]
    textures = Textures(verts_rgb.to(device))
    meshes = Meshes(verts=[verts], faces=[faces], textures=textures)

    img_ = get_img(theta_pred, elevation_pred, azimuth_pred, distance_pred,
                   meshes, phong_renderer, crop_size, render_image_size,
                   device)
    C = camera_position_from_spherical_angles(distance_pred,
                                              elevation_pred,
                                              azimuth_pred,
                                              degrees=False,
                                              device=device)
    # get_image = np.concatenate((img, alpha_merge_imgs(img, img_)), axis=1)
    img_ = shift_img(img_, dx, dy)
    get_image = alpha_merge_imgs(img, img_)

    img = Image.fromarray(get_image).save(image_name)
示例#2
0
def render_mesh(verts, faces):
    device = verts[0].get_device()
    N = len(verts)
    num_verts_per_mesh = []
    for i in range(N):
        num_verts_per_mesh.append(verts[i].shape[0])
    verts_rgb = torch.ones((N, np.max(num_verts_per_mesh), 3),
                           requires_grad=False,
                           device=device)
    for i in range(N):
        verts_rgb[i, num_verts_per_mesh[i]:, :] = -1
    textures = Textures(verts_rgb=verts_rgb)

    meshes = Meshes(verts=verts, faces=faces, textures=textures)
    elev = torch.rand(N) * 30 - 15
    azim = torch.rand(N) * 360 - 180
    R, T = look_at_view_transform(dist=2, elev=elev, azim=azim)
    cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
    sigma = 1e-4
    raster_settings = RasterizationSettings(
        image_size=128,
        blur_radius=np.log(1. / 1e-4 - 1.) * sigma,
        faces_per_pixel=40,
        perspective_correct=False)
    renderer = MeshRenderer(rasterizer=MeshRasterizer(
        cameras=cameras, raster_settings=raster_settings),
                            shader=SoftSilhouetteShader())
    return renderer(meshes)
示例#3
0
def load_untextured_mesh(mesh_path, device):
    mesh = load_objs_as_meshes([mesh_path], device=device, load_textures = False)
    verts, faces_idx, _ = load_obj(mesh_path)
    faces = faces_idx.verts_idx
    verts_rgb = torch.ones_like(verts)[None] # (1, V, 3)
    textures = Textures(verts_rgb=verts_rgb.to(device))
    mesh_no_texture = Meshes(
        verts=[verts.to(device)],
        faces=[faces.to(device)],
        textures=textures
        )
    return mesh_no_texture
示例#4
0
def deepdream(FLAGS):
    '''
    General deepdreaming wrapped in a cache to prevent recomputation

    :param FLAGS:
    :return:
    '''

    global model, renderer_instance

    model.deep_dream(FLAGS)

    deep_dream_dir = model.result_dir

    files = os.listdir(deep_dream_dir)
    verts = []
    faces = []
    verts_rgb = []
    titles = []
    for file in files:
        if file.split('.')[1] == 'ply':
            titles.append(file.split('/')[-1])
            vert, face = load_ply(os.path.join(deep_dream_dir, file))
            verts.append(vert.to(device))
            faces.append(face.to(device))
            verts_rgb.append(torch.ones_like(vert).to(device))

    textures = Textures(verts_rgb=verts_rgb)
    interpol_mesh = Meshes(verts, faces, textures)

    print('rendering images')
    images = renderer_instance(interpol_mesh).cpu().numpy()

    print('processing images')
    num_images = int(images.shape[0])
    cols = 2
    rows = -int(-num_images // cols)

    fig, axs = plt.subplots(nrows=rows,
                            ncols=cols,
                            sharex='all',
                            sharey='all',
                            figsize=(20, 20),
                            gridspec_kw={
                                'wspace': 0,
                                'hspace': 0
                            })

    for ax, im in zip(axs.flatten(), range(num_images)):
        ax.imshow(images[im, :, :, :3])
        ax.axis('off')

    return fig
示例#5
0
    def forward(self, vertices, points, faces, render_texture=False):
        tex = torch.ones_like(vertices) * self.mesh_color # (1, V, 3)
        textures = Textures(verts_rgb=tex)

        mesh = Meshes(verts=vertices, faces=faces, textures=textures)
        sil_images = self.silhouette_renderer(mesh)[..., -1].unsqueeze(1)
        screen_size = torch.ones(vertices.shape[0], 2).to(vertices.device) * self.image_size
        proj_points = self.cameras.transform_points_screen(points, screen_size)[:, :, [1, 0]]

        if render_texture:
            color_image = self.color_renderer(mesh).permute(0, 3, 1, 2)[:, :3, :, :]
            return sil_images, proj_points, color_image
        else:
            return sil_images, proj_points
 #print(plotable_proj.shape)
 #TEST
 # print("proj")
 test = proj(features, trg_mesh.verts_packed()).mean(2).unsqueeze(2)
 # print("test")
 # print(test.shape)
 # print(test)
 test = test - torch.min(test, 1)[0].unsqueeze(2)
 test =  test * (1 / torch.max(test))
 test = torch.cat((test,1 - test, torch.zeros(1, trg_mesh.verts_packed().shape[0], 1, device = device)), 2)
 # #test = torch.cat((test, test, test), 2)
 # print("shape")
 # print(test.shape)
 # print(test)
 # #test = test - torch.min(test, 2)[0].unsqueeze(2)
 model_textures = Textures(verts_rgb=test)
 # ##print(plotable_proj)
 test_block = Meshes(
      verts=[trg_mesh.verts_packed().to(device)],
      faces=[trg_mesh.faces_packed().to(device)],
      textures = model_textures
 )
 # #save_obj("texture.obj", test_block.verts_packed(), test_block.faces_packed())
 plt.imshow(image.squeeze().permute(1, 2, 0).detach().cpu().numpy())
 plt.show()
 feature_to_plot = torch.mean(features, 1)
 plt.imshow(feature_to_plot.squeeze().detach().cpu().numpy())
 plt.show()
 new_image, new_camera = render(test_block, shape["model_id"][0], shapenet_dataset, device, camera)
 plt.imshow(new_image.squeeze().permute(1, 2, 0).detach().cpu().numpy())
 plt.show()
示例#7
0
        # # normalize to unit box
        # vert_range = (verts.max(dim=0)[0] - verts.min(dim=0)[0]).max()
        # vert_center = (verts.max(dim=0)[0] + verts.min(dim=0)[0]) / 2
        # verts -= vert_center
        # verts /= vert_range

        # normalize to unit sphere
        vert_center = torch.mean(verts, dim=0)
        verts -= vert_center
        vert_scale = torch.norm(verts, dim=1).max()
        verts /= vert_scale

        save_obj(os.path.join(output_dir, "mesh.obj"),
                 verts=verts,
                 faces=verts_idx)
        textures = Textures(verts_rgb=torch.ones(1, verts.shape[0], 3)).to(
            device=device)
        meshes = Meshes(verts=[verts], faces=[verts_idx],
                        textures=textures).to(device=device)

        # Initialize an OpenGL perspective camera.
        batch_size = 1
        camera_params = {"znear": opt.znear}
        camera_sampler = CameraSampler(
            opt.num_cameras,
            batch_size,
            distance_range=torch.tensor(
                ((opt.min_dist, opt.max_dist),
                 )),  # min distance should be larger than znear+obj_dim
            sort_distance=True,
            camera_type=FoVPerspectiveCameras,
            camera_params=camera_params)