Beispiel #1
0
def eval_one_dir(exp_dir, n_pts=50000):
    """
    Function for one directory
    """
    device = torch.device('cuda:0')
    cfg = config.load_config(os.path.join(exp_dir, 'config.yaml'))
    dataset = config.create_dataset(cfg.data, mode='val')
    meshes_gt = dataset.get_meshes().to(device)
    val_gt_pts_file = os.path.join(cfg.data.data_dir, 'val%d.ply' % n_pts)
    if os.path.isfile(val_gt_pts_file):
        points, normals = np.split(read_ply(val_gt_pts_file), 2, axis=1)
        pcl_gt = Pointclouds(
            torch.from_numpy(points[None, ...]).float(),
            torch.from_numpy(normals[None, ...]).float()).to(device)
    else:
        pcl_gt = dataset.get_pointclouds(n_pts).to(device)
        trimesh.Trimesh(pcl_gt.points_packed().cpu().numpy(),
                        vertex_normals=pcl_gt.normals_packed().cpu().numpy(),
                        process=False).export(val_gt_pts_file,
                                              vertex_normal=True)

    # load vis directories
    vis_dir = os.path.join(exp_dir, 'vis')
    vis_files = sorted(get_filenames(vis_dir, '_mesh.ply'))
    iters = [int(os.path.basename(v).split('_')[0]) for v in vis_files]
    best_dict = defaultdict(lambda: float('inf'))
    vis_eval_csv = os.path.join(vis_dir, "evaluation_n%d.csv" % n_pts)
    if not os.path.isfile(vis_eval_csv):
        with open(os.path.join(vis_dir, "evaluation_n%d.csv" % n_pts),
                  "w") as f:
            fieldnames = ['mtime', 'it', 'chamfer_p', 'chamfer_n', 'pf_dist']
            writer = csv.DictWriter(f,
                                    fieldnames=fieldnames,
                                    restval="-",
                                    extrasaction="ignore")
            writer.writeheader()
            mtime0 = None
            for it, vis_file in zip(iters, vis_files):
                eval_dict = OrderedDict()
                mtime = os.path.getmtime(vis_file)
                if mtime0 is None:
                    mtime0 = mtime
                eval_dict['it'] = it
                eval_dict['mtime'] = mtime - mtime0
                val_pts_file = os.path.join(
                    vis_dir,
                    os.path.basename(vis_file).replace('_mesh',
                                                       '_val%d' % n_pts))
                if os.path.isfile(val_pts_file):
                    points, normals = np.split(read_ply(val_pts_file),
                                               2,
                                               axis=1)
                    points = torch.from_numpy(points).float().to(
                        device=device).view(1, -1, 3)
                    normals = torch.from_numpy(normals).float().to(
                        device=device).view(1, -1, 3)
                else:
                    mesh = trimesh.load(vis_file, process=False)
                    # points, normals = pcu.sample_mesh_poisson_disk(
                    #     mesh.vertices, mesh.faces,
                    #     mesh.vertex_normals.ravel().reshape(-1, 3), n_pts, use_geodesic_distance=True)
                    # p_idx = np.random.permutation(points.shape[0])[:n_pts]
                    # points = points[p_idx, ...]
                    # normals = normals[p_idx, ...]
                    # points = torch.from_numpy(points).float().to(
                    #     device=device).view(1, -1, 3)
                    # normals = torch.from_numpy(normals).float().to(
                    #     device=device).view(1, -1, 3)
                    meshes = Meshes(
                        torch.from_numpy(mesh.vertices[None, ...]).float(),
                        torch.from_numpy(mesh.faces[None,
                                                    ...]).float()).to(device)
                    points, normals = sample_points_from_meshes(
                        meshes, n_pts, return_normals=True)
                    trimesh.Trimesh(points.cpu().numpy()[0],
                                    vertex_normals=normals.cpu().numpy()[0],
                                    process=False).export(val_pts_file,
                                                          vertex_normal=True)
                pcl = Pointclouds(points, normals)
                chamfer_p, chamfer_n = chamfer_distance(
                    points,
                    pcl_gt.points_padded(),
                    x_normals=normals,
                    y_normals=pcl_gt.normals_padded(),
                )
                eval_dict['chamfer_p'] = chamfer_p.item()
                eval_dict['chamfer_n'] = chamfer_n.item()
                pf_dist = point_mesh_face_distance(meshes_gt, pcl)
                eval_dict['pf_dist'] = pf_dist.item()
                writer.writerow(eval_dict)
                for k, v in eval_dict.items():
                    if v < best_dict[k]:
                        best_dict[k] = v
                        print('best {} so far ({}): {:.4g}'.format(
                            k, vis_file, v))

    # generation dictories
    gen_dir = os.path.join(exp_dir, 'generation')
    if not os.path.isdir(gen_dir):
        return

    final_file = os.path.join(gen_dir, 'mesh.ply')
    val_pts_file = final_file[:-4] + '_val%d' % n_pts + '.ply'
    if not os.path.isfile(final_file):
        return

    gen_file_csv = os.path.join(gen_dir, "evaluation_n%d.csv" % n_pts)
    if not os.path.isfile(gen_file_csv):
        with open(os.path.join(gen_dir, "evaluation_n%d.csv" % n_pts),
                  "w") as f:
            fieldnames = ['chamfer_p', 'chamfer_n', 'pf_dist']
            writer = csv.DictWriter(f,
                                    fieldnames=fieldnames,
                                    restval="-",
                                    extrasaction="ignore")
            writer.writeheader()
            eval_dict = OrderedDict()
            mesh = trimesh.load(final_file)
            # points, normals = pcu.sample_mesh_poisson_disk(
            #     mesh.vertices, mesh.faces,
            #     mesh.vertex_normals.ravel().reshape(-1, 3), n_pts, use_geodesic_distance=True)
            # p_idx = np.random.permutation(points.shape[0])[:n_pts]
            # points = points[p_idx, ...]
            # normals = normals[p_idx, ...]
            # points = torch.from_numpy(points).float().to(
            #     device=device).view(1, -1, 3)
            # normals = torch.from_numpy(normals).float().to(
            #     device=device).view(1, -1, 3)
            meshes = Meshes(
                torch.from_numpy(mesh.vertices[None, ...]).float(),
                torch.from_numpy(mesh.faces[None, ...]).float()).to(device)
            points, normals = sample_points_from_meshes(meshes,
                                                        n_pts,
                                                        return_normals=True)
            trimesh.Trimesh(points.cpu().numpy()[0],
                            vertex_normals=normals.cpu().numpy()[0],
                            process=False).export(val_pts_file,
                                                  vertex_normal=True)
            pcl = Pointclouds(points, normals)
            chamfer_p, chamfer_n = chamfer_distance(
                points,
                pcl_gt.points_padded(),
                x_normals=normals,
                y_normals=pcl_gt.normals_padded(),
            )
            eval_dict['chamfer_p'] = chamfer_p.item()
            eval_dict['chamfer_n'] = chamfer_n.item()
            pf_dist = point_mesh_face_distance(meshes_gt, pcl)
            eval_dict['pf_dist'] = pf_dist.item()
            writer.writerow(eval_dict)
            for k, v in eval_dict.items():
                if v < best_dict[k]:
                    best_dict[k] = v
                    print('best {} so far ({}): {:.4g}'.format(
                        k, final_file, v))
Beispiel #2
0
def _add_pointcloud_trace(
    fig: go.Figure,
    pointclouds: Pointclouds,
    trace_name: str,
    subplot_idx: int,
    ncols: int,
    max_points_per_pointcloud: int,
    marker_size: int,
):  # pragma: no cover
    """
    Adds a trace rendering a Pointclouds object to the passed in figure, with
    a given name and in a specific subplot.

    Args:
        fig: plotly figure to add the trace within.
        pointclouds: Pointclouds object to render. It can be batched.
        trace_name: name to label the trace with.
        subplot_idx: identifies the subplot, with 0 being the top left.
        ncols: the number of subplots per row.
        max_points_per_pointcloud: the number of points to render, which are randomly sampled.
        marker_size: the size of the rendered points
    """
    pointclouds = pointclouds.detach().cpu().subsample(
        max_points_per_pointcloud)
    verts = pointclouds.points_packed()
    features = pointclouds.features_packed()

    color = None
    if features is not None:
        if features.shape[1] == 4:  # rgba
            template = "rgb(%d, %d, %d, %f)"
            rgb = (features[:, :3].clamp(0.0, 1.0) * 255).int()
            color = [
                template % (*rgb_, a_)
                for rgb_, a_ in zip(rgb, features[:, 3])
            ]

        if features.shape[1] == 3:
            template = "rgb(%d, %d, %d)"
            rgb = (features.clamp(0.0, 1.0) * 255).int()
            color = [template % (r, g, b) for r, g, b in rgb]

    row = subplot_idx // ncols + 1
    col = subplot_idx % ncols + 1
    fig.add_trace(
        go.Scatter3d(
            x=verts[:, 0],
            y=verts[:, 1],
            z=verts[:, 2],
            marker={
                "color": color,
                "size": marker_size
            },
            mode="markers",
            name=trace_name,
        ),
        row=row,
        col=col,
    )

    # Access the current subplot's scene configuration
    plot_scene = "scene" + str(subplot_idx + 1)
    current_layout = fig["layout"][plot_scene]

    # update the bounds of the axes for the current trace
    verts_center = verts.mean(0)
    max_expand = (verts.max(0)[0] - verts.min(0)[0]).max()
    _update_axes_bounds(verts_center, max_expand, current_layout)
Beispiel #3
0
def point_mesh_edge_distance(meshes: Meshes, pcls: Pointclouds):
    """
    Computes the distance between a pointcloud and a mesh within a batch.
    Given a pair `(mesh, pcl)` in the batch, we define the distance to be the
    sum of two distances, namely `point_edge(mesh, pcl) + edge_point(mesh, pcl)`

    `point_edge(mesh, pcl)`: Computes the squared distance of each point p in pcl
        to the closest edge segment in mesh and averages across all points in pcl
    `edge_point(mesh, pcl)`: Computes the squared distance of each edge segment in mesh
        to the closest point in pcl and averages across all edges in mesh.

    The above distance functions are applied for all `(mesh, pcl)` pairs in the batch
    and then averaged across the batch.

    Args:
        meshes: A Meshes data structure containing N meshes
        pcls: A Pointclouds data structure containing N pointclouds

    Returns:
        loss: The `point_edge(mesh, pcl) + edge_point(mesh, pcl)` distance
            between all `(mesh, pcl)` in a batch averaged across the batch.
    """
    if len(meshes) != len(pcls):
        raise ValueError("meshes and pointclouds must be equal sized batches")
    N = len(meshes)

    # packed representation for pointclouds
    points = pcls.points_packed()  # (P, 3)
    points_first_idx = pcls.cloud_to_packed_first_idx()
    max_points = pcls.num_points_per_cloud().max().item()

    # packed representation for edges
    verts_packed = meshes.verts_packed()
    edges_packed = meshes.edges_packed()
    segms = verts_packed[edges_packed]  # (S, 2, 3)
    segms_first_idx = meshes.mesh_to_edges_packed_first_idx()
    max_segms = meshes.num_edges_per_mesh().max().item()

    # point to edge distance: shape (P,)
    point_to_edge = point_edge_distance(points, points_first_idx, segms,
                                        segms_first_idx, max_points)

    # weight each example by the inverse of number of points in the example
    point_to_cloud_idx = pcls.packed_to_cloud_idx()  # (sum(P_i), )
    num_points_per_cloud = pcls.num_points_per_cloud()  # (N,)
    weights_p = num_points_per_cloud.gather(0, point_to_cloud_idx)
    weights_p = 1.0 / weights_p.float()
    point_to_edge = point_to_edge * weights_p
    point_dist = point_to_edge.sum() / N

    # edge to edge distance: shape (S,)
    edge_to_point = edge_point_distance(points, points_first_idx, segms,
                                        segms_first_idx, max_segms)

    # weight each example by the inverse of number of edges in the example
    segm_to_mesh_idx = meshes.edges_packed_to_mesh_idx()  # (sum(S_n),)
    num_segms_per_mesh = meshes.num_edges_per_mesh()  # (N,)
    weights_s = num_segms_per_mesh.gather(0, segm_to_mesh_idx)
    weights_s = 1.0 / weights_s.float()
    edge_to_point = edge_to_point * weights_s
    edge_dist = edge_to_point.sum() / N

    return point_dist + edge_dist
Beispiel #4
0
def point_mesh_face_distance(meshes: Meshes, pcls: Pointclouds):
    """
    Computes the distance between a pointcloud and a mesh within a batch.
    Given a pair `(mesh, pcl)` in the batch, we define the distance to be the
    sum of two distances, namely `point_face(mesh, pcl) + face_point(mesh, pcl)`

    `point_face(mesh, pcl)`: Computes the squared distance of each point p in pcl
        to the closest triangular face in mesh and averages across all points in pcl
    `face_point(mesh, pcl)`: Computes the squared distance of each triangular face in
        mesh to the closest point in pcl and averages across all faces in mesh.

    The above distance functions are applied for all `(mesh, pcl)` pairs in the batch
    and then averaged across the batch.

    Args:
        meshes: A Meshes data structure containing N meshes
        pcls: A Pointclouds data structure containing N pointclouds

    Returns:
        loss: The `point_face(mesh, pcl) + face_point(mesh, pcl)` distance
            between all `(mesh, pcl)` in a batch averaged across the batch.
    """

    if len(meshes) != len(pcls):
        raise ValueError("meshes and pointclouds must be equal sized batches")
    N = len(meshes)

    # packed representation for pointclouds
    points = pcls.points_packed()  # (P, 3)
    points_first_idx = pcls.cloud_to_packed_first_idx()
    max_points = pcls.num_points_per_cloud().max().item()

    # packed representation for faces
    verts_packed = meshes.verts_packed()
    faces_packed = meshes.faces_packed()
    tris = verts_packed[faces_packed]  # (T, 3, 3)
    tris_first_idx = meshes.mesh_to_faces_packed_first_idx()
    max_tris = meshes.num_faces_per_mesh().max().item()

    # point to face distance: shape (P,)
    point_to_face = point_face_distance(points, points_first_idx, tris,
                                        tris_first_idx, max_points)

    # weight each example by the inverse of number of points in the example
    point_to_cloud_idx = pcls.packed_to_cloud_idx()  # (sum(P_i),)
    num_points_per_cloud = pcls.num_points_per_cloud()  # (N,)
    weights_p = num_points_per_cloud.gather(0, point_to_cloud_idx)
    weights_p = 1.0 / weights_p.float()
    point_to_face = point_to_face * weights_p
    point_dist = point_to_face.sum() / N

    # face to point distance: shape (T,)
    face_to_point = face_point_distance(points, points_first_idx, tris,
                                        tris_first_idx, max_tris)

    # weight each example by the inverse of number of faces in the example
    tri_to_mesh_idx = meshes.faces_packed_to_mesh_idx()  # (sum(T_n),)
    num_tris_per_mesh = meshes.num_faces_per_mesh()  # (N, )
    weights_t = num_tris_per_mesh.gather(0, tri_to_mesh_idx)
    weights_t = 1.0 / weights_t.float()
    face_to_point = face_to_point * weights_t
    face_dist = face_to_point.sum() / N

    return point_dist + face_dist
Beispiel #5
0
def _add_pointcloud_trace(
    fig: go.Figure,
    pointclouds: Pointclouds,
    trace_name: str,
    subplot_idx: int,
    ncols: int,
    max_points_per_pointcloud: int,
    marker_size: int,
):
    """
    Adds a trace rendering a Pointclouds object to the passed in figure, with
    a given name and in a specific subplot.

    Args:
        fig: plotly figure to add the trace within.
        pointclouds: Pointclouds object to render. It can be batched.
        trace_name: name to label the trace with.
        subplot_idx: identifies the subplot, with 0 being the top left.
        ncols: the number of sublpots per row.
        max_points_per_pointcloud: the number of points to render, which are randomly sampled.
        marker_size: the size of the rendered points
    """
    pointclouds = pointclouds.detach().cpu()
    verts = pointclouds.points_packed()
    features = pointclouds.features_packed()

    indices = None
    if pointclouds.num_points_per_cloud().max() > max_points_per_pointcloud:
        start_index = 0
        index_list = []
        for num_points in pointclouds.num_points_per_cloud():
            if num_points > max_points_per_pointcloud:
                indices_cloud = np.random.choice(num_points,
                                                 max_points_per_pointcloud,
                                                 replace=False)
                index_list.append(start_index + indices_cloud)
            else:
                index_list.append(start_index + np.arange(num_points))
            start_index += num_points
        indices = np.concatenate(index_list)
        verts = verts[indices]

    color = None
    if features is not None:
        if indices is not None:
            # Only select features if we selected vertices above
            features = features[indices]
        if features.shape[1] == 4:  # rgba
            template = "rgb(%d, %d, %d, %f)"
            rgb = (features[:, :3].clamp(0.0, 1.0) * 255).int()
            color = [
                template % (*rgb_, a_)
                for rgb_, a_ in zip(rgb, features[:, 3])
            ]

        if features.shape[1] == 3:
            template = "rgb(%d, %d, %d)"
            rgb = (features.clamp(0.0, 1.0) * 255).int()
            color = [template % (r, g, b) for r, g, b in rgb]

    row = subplot_idx // ncols + 1
    col = subplot_idx % ncols + 1
    fig.add_trace(
        go.Scatter3d(  # pyre-ignore[16]
            x=verts[:, 0],
            y=verts[:, 1],
            z=verts[:, 2],
            marker={
                "color": color,
                "size": marker_size
            },
            mode="markers",
            name=trace_name,
        ),
        row=row,
        col=col,
    )

    # Access the current subplot's scene configuration
    plot_scene = "scene" + str(subplot_idx + 1)
    current_layout = fig["layout"][plot_scene]

    # update the bounds of the axes for the current trace
    verts_center = verts.mean(0)
    max_expand = (verts.max(0)[0] - verts.min(0)[0]).max()
    _update_axes_bounds(verts_center, max_expand, current_layout)