Exemplo n.º 1
0
def vb_index(surf_vertices,
             surf_faces,
             n_cpus,
             data,
             norm,
             cort_index,
             output_name=None,
             nib_surf=None):
    """Computes the Vogt-Bailey index of vertices for the whole mesh

       Parameters
       ----------
       surf_vertices: (M, 3) numpy array
           Vertices of the mesh
       surf_faces: (M, 3) numpy array
           Faces of the mesh. Used to find the neighborhood of a given vertice
       n_cpus: integer
               How many CPUS to run the calculation
       data: (M, N) numpy array
           Data to use the to calculate the VB index. M must math the number of vertices in the mesh
       norm: string
             Method of reordering. Possibilities are 'geig', 'unnorm', 'rw' and 'sym'
       cort_index: (M) numpy array
            Mask for detection of middle brain structures
       output_name: string
            Root of file to save the results to. If specified, nib_surf must also be provided
       nib_surf: Nibabel object
            Nibabel object containing metadata to be replicated

       Returns
       -------
       result: (N) numpy array
                   Resulting VB index of the indices in range
    """

    # Calculate how many vertices each process is going to be responsible for
    n_items = len(surf_vertices)
    n_cpus = min(n_items, n_cpus)
    dn = n_items // (n_cpus)

    # Init multiprocessing components
    counter = Value('i', 0)
    pool = Pool(initializer=init, initargs=(counter, n_items))
    # vb_index_internal_loop(0, n_items, surf_faces, data, norm)
    # Spawn the threads that are going to do the real work
    threads = []
    for i0 in range(0, n_items, dn):
        iN = min(i0 + dn, n_items)
        threads.append(
            pool.apply_async(vb_index_internal_loop,
                             (i0, iN, surf_faces, data, norm)))

    # Gather the results from the threads we just spawned
    results = []
    for i, res in enumerate(threads):
        for r in res.get():
            results.append(r)
    results = np.array(results)

    results[np.logical_not(cort_index)] = np.nan

    # Save file
    if output_name is not None:
        io.save_gifti(nib_surf, results, output_name + ".vbi.shape.gii")

    # Cleanup
    pool.close()
    pool.terminate()
    pool.join()

    return results
Exemplo n.º 2
0
def vb_cluster(surf_vertices,
               surf_faces,
               n_cpus,
               data,
               cluster_index,
               norm,
               output_name=None,
               nib_surf=None):
    """Computes the clustered Vogt-Bailey index of vertices for the whole mesh

       Parameters
       ----------
       surf_vertices: (M, 3) numpy array
           Vertices of the mesh
       surf_faces: (M, 3) numpy array
           Faces of the mesh. Used to find the neighborhood of a given vertice
       n_cpus: integer
               How many CPUS to run the calculation
       data: (M, N) numpy array
           Data to use the to calculate the VB index. M must math the number of vertices in the mesh
       cluster_index: (M) numpy array
           Array containing the cluster which each vertex belongs
       norm: string
             Method of reordering. Possibilities are 'geig', 'unnorm', 'rw' and 'sym'
       cort_index: (M) numpy array
            Mask for detection of middle brain structures
       output_name: string
            Root of file to save the results to. If specified, nib_surf must also be provided
       nib_surf: Nibabel object
            Nibabel object containing metadata to be replicated

       Returns
       -------
       results_eigenvalues: (M) numpy array
                            Resulting VB index of the clusters
       results_eigenvectors: (M, N) numpy array
                            Resuling Fiedler vectors of the clusters
    """

    # Find the cluster indices, and the mibrain structures
    cluster_labels = np.unique(cluster_index)
    midline_index = cluster_index == 0

    # Calculate how many vertices each process is going to be responsible for
    n_items = len(cluster_labels)
    n_cpus = min(n_items, n_cpus)
    dn = n_items // (n_cpus)

    # Init multiprocessing components
    counter = Value('i', 0)
    pool = Pool(initializer=init, initargs=(counter, n_items))

    # Spawn the threads that are going to do the real work
    threads = []
    for i0 in range(0, n_items, dn):
        iN = min(i0 + dn, n_items)
        threads.append(
            pool.apply_async(vb_cluster_internal_loop,
                             (i0, iN, surf_faces, data, cluster_index, norm)))

    # Gather the results from the threads we just spawned
    results = []
    results_eigenvectors_l = []
    for i, res in enumerate(threads):
        for r, rv in res.get():
            results.append(r)
            results_eigenvectors_l.append(rv)
    results = np.array(results)

    # Now we need to push the data back into the original vertices
    results_eigenvalues = np.zeros(len(surf_vertices))
    results_eigenvectors = []
    for i in range(n_items):
        cluster = cluster_labels[i]
        if cluster != 0:
            results_eigenvectors_local = np.zeros(len(surf_vertices))
            idx = np.where(cluster_index == cluster)[0]
            results_eigenvalues[idx] = results[i]
            results_eigenvectors_local[idx] = results_eigenvectors_l[i]
            results_eigenvectors.append(results_eigenvectors_local)

    results_eigenvectors = np.array(results_eigenvectors).transpose()

    # Remove the midbrain
    results_eigenvalues[midline_index] = np.nan
    results_eigenvectors[midline_index, :] = np.nan

    # Save file
    if output_name is not None:
        io.save_gifti(nib_surf, results_eigenvalues,
                      output_name + ".vb-cluster.value.shape.gii")
        io.save_gifti(nib_surf, results_eigenvectors,
                      output_name + ".vb-cluster.vector.shape.gii")

    # Cleanup
    pool.close()
    pool.terminate()
    pool.join()

    return results_eigenvalues, results_eigenvectors
Exemplo n.º 3
0
def vb_hybrid(surf_vertices,
              brain_mask,
              affine,
              n_cpus,
              data,
              norm,
              cort_index,
              output_name=None,
              nib_surf=None):
    """Computes the Vogt-Bailey index of vertices for the whole mesh

       Parameters
       ----------
       surf_vertices: (M, 3) numpy array
           Vertices of the mesh
       brain_mask: (nRows, nCols, nSlices) numpy array
           Whole brain mask. Used to mask volumetric data
       n_cpus: integer
           How many CPUS are available to run the calculation
       data: (nRows, nCols, nSlices, N) numpy array
           Volumetric data used to calculate the VB index. N is the number of maps
       norm: string
           Method of reordering. Possibilities are 'geig', 'unnorm', 'rw' and 'sym'
       cort_index: (M) numpy array
           Mask for detection of middle brain structures
       output_name: string
           Root of file to save the results to. If specified, nib_surf must also be provided
       nib_surf: nibabel object
           Nibabel object containing metadata to be replicated

       Returns
       -------
       result: (N) numpy array
               Resulting VB index of the indices in range
    """

    # Convert vertex coordinates to voxel coordinates
    vox_coords = np.round(
        nibabel.affines.apply_affine(np.linalg.inv(affine), surf_vertices))

    # Calculate how many vertices each process is going to be responsible for
    n_items = len(surf_vertices)
    n_cpus = min(n_items, n_cpus)
    dn = n_items // (n_cpus)

    # Init multiprocessing components
    counter = Value('i', 0)
    pool = Pool(initializer=init, initargs=(counter, n_items))

    def callback(result):
        pool.close()
        pool.terminate()

    # Spawn the threads that are going to do the real work
    threads = []
    for i0 in range(0, n_items, dn):
        iN = min(i0 + dn, n_items)
        threads.append(
            pool.apply_async(vb_hybrid_internal_loop,
                             (i0, iN, vox_coords, brain_mask, data, norm),
                             error_callback=callback))

    # Gather the results from the threads we just spawned
    results = []
    for i, res in enumerate(threads):
        res_ = res.get()
        for r in res_:
            results.append(r)
    results = np.array(results)
    results[np.logical_not(cort_index)] = np.nan

    # Save file
    if output_name is not None:
        io.save_gifti(nib_surf, results, output_name + ".vbi-hybrid.shape.gii")

    # Cleanup
    pool.close()
    pool.terminate()
    pool.join()

    return results