예제 #1
0
def get_visibility(vertices, triangles, image_size=256):
    '''
        vertices: [batch_size, nv, 3]. range:[-1, 1]
        triangles: [batch_size, nf, 3]
    '''
    bz = vertices.shape[0]
    h = w = image_size
    device = vertices.device
    # vertices[...,-1] = -vertices[...,-1]
    vertices = vertices * image_size / 2. + image_size / 2.

    depth_buffer = torch.zeros([bz, h, w]).float().to(device) + 1e6
    triangle_buffer = torch.zeros([bz, h, w]).int().to(device) - 1
    baryw_buffer = torch.zeros([bz, h, w, 3]).float().to(device)
    vert_vis = torch.zeros([bz, vertices.shape[1]]).float().to(device)

    st = time()
    face_vertices = srf.face_vertices(vertices, triangles)
    standard_rasterize_cuda.standard_rasterize(face_vertices, depth_buffer,
                                               triangle_buffer, baryw_buffer,
                                               h, w)

    triangle_buffer = triangle_buffer.reshape(bz, -1)
    for i in range(bz):
        tri_visind = torch.unique(triangle_buffer[i])[1:].long()
        vert_visind = triangles[i, tri_visind, :].flatten()
        vert_vis[i, torch.unique(vert_visind.long())] = 1.0
    print(time() - st)
    return vert_vis
예제 #2
0
파일: mesh.py 프로젝트: stjordanis/kaolin-1
 def face_textures(self):
     if self.texture_type in ['surface']:
         return self.textures
     elif self.texture_type in ['vertex']:
         return srf.face_vertices(self.textures, self.faces)
     else:
         raise ValueError('texture type not applicable')
예제 #3
0
def eval_iou(predicted_v,predicted_f,voxels):
    faces_ = srf.face_vertices(predicted_v, predicted_f).data
    faces_norm = faces_ * 1. * (32. - 1) / 32. + 0.5
    voxels_predict = srf.voxelization(faces_norm, 32, False).cpu().numpy()
    voxels_predict = voxels_predict.transpose(0, 2, 1, 3)[:, :, :, ::-1]
    iou = (voxels * voxels_predict).sum((1, 2, 3)) / (0 < (voxels + voxels_predict)).sum((1, 2, 3))
    return iou
예제 #4
0
 def evaluate_iou(self, images, voxels):
     vertices, faces, textures = self.reconstruct(images)
     faces_ = srf.face_vertices(vertices, faces).data
     faces_norm = faces_ * 1. * (32. - 1) / 32. + 0.5
     voxels_predict = srf.voxelization(faces_norm, 32, False).cpu().numpy()
     voxels_predict = voxels_predict.transpose(0, 2, 1, 3)[:, :, :, ::-1]
     iou = (voxels * voxels_predict).sum((1, 2, 3)) / (0 < (voxels + voxels_predict)).sum((1, 2, 3))
     return iou, vertices, faces, textures
예제 #5
0
def get_visibility_z(vertices, triangles, image_size=256):
    '''
        vertices: [batch_size, nv, 3]. range:[-1, 1]
        triangles: [batch_size, nf, 3]
    '''
    bz = vertices.shape[0]
    h = w = image_size
    device = vertices.device
    # vertices[...,-1] = -vertices[...,-1]
    vertices = vertices * image_size / 2. + image_size / 2.

    depth_buffer = torch.zeros([bz, h, w]).float().to(device) + 1e6
    triangle_buffer = torch.zeros([bz, h, w]).int().to(device) - 1
    baryw_buffer = torch.zeros([bz, h, w, 3]).float().to(device)
    vert_vis = torch.zeros([bz, vertices.shape[1]]).float().to(device)

    st = time()
    face_vertices = srf.face_vertices(vertices, triangles)
    standard_rasterize_cuda.standard_rasterize(face_vertices, depth_buffer,
                                               triangle_buffer, baryw_buffer,
                                               h, w)

    zrange = vertices[..., -1].max() - vertices[..., -1].min()
    for i in range(bz):
        for j in range(vertices.shape[1]):
            [x, y, z] = vertices[i, j]
            ul = depth_buffer[i, int(torch.floor(y)), int(torch.floor(x))]
            ur = depth_buffer[i, int(torch.floor(y)), int(torch.ceil(x))]
            dl = depth_buffer[i, int(torch.ceil(y)), int(torch.floor(x))]
            dr = depth_buffer[i, int(torch.ceil(y)), int(torch.ceil(x))]

            yd = y - torch.floor(y)
            xd = x - torch.floor(x)
            depth = ul * (1 - xd) * (1 - yd) + ur * xd * (1 - yd) + dl * (
                1 - xd) * yd + dr * xd * yd
            if z < depth + zrange * 0.02:
                vert_vis[i, j] = 1.0
    print(time() - st)
    return vert_vis
            vertices_real, faces_real = mesh.vertices, mesh.faces

            vertices_real = [vertices_real for _ in range(num_rendered_images)]
            faces_real = [faces_real for _ in range(num_rendered_images)]

            vertices_real = torch.cat(vertices_real, dim=0)
            faces_real = torch.cat(faces_real, dim=0)

            vertices_generate, faces_generate = model.reconstruct(
                rendered_images_i)

            # srf.save_obj('/mnt/zhengwen/model_synthesis/shapeNetCore/test_generate.obj', vertices_generate[0], faces_generate[0])
            # srf.save_obj('/mnt/zhengwen/model_synthesis/shapeNetCore/test_real.obj', vertices_real[0], faces_real[0])

            faces_real_ = srf.face_vertices(vertices_real, faces_real).data
            faces_real_norm = faces_real_ * 1. * (32. - 1) / 32. + 0.5
            voxels_real = srf.voxelization(faces_real_norm, 32,
                                           False).cpu().numpy()

            faces_generate_ = srf.face_vertices(vertices_generate,
                                                faces_generate).data
            faces_generate_norm = faces_generate_ * 1. * (32. - 1) / 32. + 0.5
            voxels_generate = srf.voxelization(faces_generate_norm, 32,
                                               False).cpu().numpy()
            voxels_generate = voxels_generate.transpose(0, 2, 1,
                                                        3)[:, :, :, ::-1]

            iou = (voxels_real * voxels_generate).sum(
                (1, 2, 3)) / (0 < (voxels_real + voxels_generate)).sum(
                    (1, 2, 3))
예제 #7
0
파일: mesh.py 프로젝트: JinlongYANG/vis
 def face_vertices(self):
     if self._face_vertices_update:
         self._face_vertices = srf.face_vertices(self.vertices, self.faces)
         self._face_vertices_update = False
     return self._face_vertices
예제 #8
0
 def face_vertices(self):
     return srf.face_vertices(self.vertices, self.faces)
예제 #9
0
            #     # print(os.path.join(os.path.join(os.path.join(os.path.join(root_dir, sub_root_dir)), obj_dir), file_name))
            #     rendered_images_i.append(image)
            # rendered_images_i = np.array(rendered_images_i)
            # rendered_images_i = rendered_images_i.transpose((0, 3, 1, 2))
            #
            # npz_images.append(rendered_images_i)

            obj_file_i = os.path.join(
                os.path.join(
                    os.path.join(os.path.join(root_dir, sub_root_dir)),
                    obj_dir), 'model.obj')
            mesh = sr.Mesh.from_obj(obj_file_i)

            vertices_real, faces_real = mesh.vertices, mesh.faces

            faces_real_ = srf.face_vertices(vertices_real, faces_real).data
            faces_real_norm = faces_real_ * 1. * (32. - 1) / 32. + 0.5
            voxels_real = srf.voxelization(faces_real_norm, 32,
                                           False).cpu().numpy()

            npz_models.append(np.squeeze(voxels_real).transpose((1, 2, 0)))

            obj_count += 1

            if obj_count == 10:
                break

        npz_models = np.array(npz_models)
        npz_images = np.array(npz_images)

        sub_root_count += 1