def embedding(upper_corr, full_shape, n_components):
    '''
    Diffusion embedding on connectivity matrix using mapaling package:
    https://github.com/satra/mapalign
    '''
    # reconstruct full matrix
    print('...full matrix')
    full_corr = np.zeros(tuple(full_shape))
    full_corr[np.triu_indices_from(full_corr, k=1)] = np.nan_to_num(upper_corr)
    del upper_corr
    gc.collect()
    full_corr += full_corr.T
    gc.collect()

    # run actual embedding
    print('...embed')
    K = (full_corr + 1) / 2.
    del full_corr
    gc.collect()
    K[np.where(np.eye(K.shape[0]) == 1)] = 1.0

    embedding_results, \
        embedding_dict = embed.compute_diffusion_map(K,
                                                     n_components=n_components,
                                                     overwrite=True,
                                                     return_result=True)

    return embedding_results, embedding_dict
def get_gradients(connectmat, ncomp):
    threshMat = connectmat.copy()
    np.fill_diagonal(threshMat, 0)

    # Generate percentile thresholds for 90th percentile
    perc = np.array([np.percentile(x, 90) for x in threshMat])

    # Threshold each row of the matrix by setting values below 90th perc to 0
    for i in range(threshMat.shape[0]):
        threshMat[i, threshMat[i, :] < perc[i]] = 0

    # Count negative values per row
    neg_values = np.array(
        [sum(threshMat[i, :] < 0) for i in range(threshMat.shape[0])])
    print('Negative values occur in %d rows' % sum(neg_values > 0))

    # remove negative ones
    threshMat[threshMat < 0] = 0

    cosSimilarity = sklearn.metrics.pairwise.cosine_similarity(threshMat)
    # de = DiffusionMapEmbedding().fit_transform(cosSimilarity)
    dme = compute_diffusion_map(cosSimilarity,
                                n_components=ncomp,
                                return_result=True)

    # lambdas
    lambdas = dme[1]['lambdas']

    # gradients
    grads = dme[0]

    return grads, lambdas
Example #3
0
def dme(network, threshold=90, n_components=10, return_result=False, **kwargs):
    """
    Threshold, cosine similarity, and diffusion map embed `network`

    Parameters
    ----------
    network : (N, N) array_like
        Symmetric network on which to perform diffusion map embedding
    threshold : [0, 100] float, optional
        Threshold used to "sparsify" `network` prior to embedding. Default: 90
    n_components : int, optional
        Number of components to retain from embedding of `network`. Default: 10
    return_result : bool, optional
        Whether to return result dictionary including eigenvalues, original
        eigenvectors, etc. from embedding. Default: False
    kwargs : key-value pairs, optional
        Passed directly to :func:`mapalign.embed.compute_diffusion_map`

    Returns
    -------
    embedding : (N, C) numpy.ndarray
        Embedding of `N` samples in `C`-dimensional spaces
    res : dict
        Only if `return_result=True`
    """

    from mapalign import embed
    from sklearn import metrics
    from sklearn.utils.extmath import _deterministic_vector_sign_flip

    # threshold
    network = network.copy()
    threshold = np.percentile(network, threshold, axis=1, keepdims=True)
    network[network < threshold] = 0

    # cosine similarity
    network = metrics.pairwise.cosine_similarity(network)

    # embed (and ensure consistent output with regard to sign flipping)
    emb, res = embed.compute_diffusion_map(network,
                                           n_components=n_components,
                                           return_result=True,
                                           **kwargs)
    emb = _deterministic_vector_sign_flip(emb.T).T

    if return_result:
        return emb, res

    return emb
def embed_dense_connectome(aff):
    """
    This function takes the affinity matrix and returns the low-dimensional
    embedding.
    """

    from mapalign import embed

    # Embedding using mapalign
    emb, res = embed.compute_diffusion_map(aff, alpha=0.5, n_components=300)

    # Divide each non-trivial eigenvector by the first one and remove the latter
    X = res['vectors']
    X = (X.T / X[:, 0]).T[:, 1:]

    return X
Example #5
0
def main(s):

    import os.path

    if os.path.isfile(
            '/network/lustre/iss01/home/daniel.margulies/data/lsd/embedding/embedding_dense_emb.%s.npy'
            % s):

        emb = np.load(
            '/network/lustre/iss01/home/daniel.margulies/data/lsd/embedding/embedding_dense_emb.%s.npy'
            % s)

    else:

        K = load_fs(s)
        K[np.isnan(K)] = 0.0

        A_mA = K - K.mean(1)[:, None]
        ssA = (A_mA**2).sum(1)
        Asq = np.sqrt(np.dot(ssA[:, None], ssA[None]))
        Adot = A_mA.dot(A_mA.T)

        K = Adot / Asq
        del A_mA, ssA, Asq, Adot
        K = run_perc(K, 90)

        norm = (K * K).sum(0, keepdims=True)**.5
        K = K.T @ K
        aff = K / norm / norm.T
        del norm, K
        #aff = sparse.csr_matrix(aff)

        emb, res = embed.compute_diffusion_map(aff,
                                               alpha=0.5,
                                               n_components=5,
                                               skip_checks=True,
                                               overwrite=True,
                                               eigen_solver=eigsh,
                                               return_result=True)
        del aff

        np.save(
            '/network/lustre/iss01/home/daniel.margulies/data/lsd/embedding/embedding_dense_emb.%s.npy'
            % s, emb)
        np.save(
            '/network/lustre/iss01/home/daniel.margulies/data/lsd/embedding/embedding_dense_res.%s.npy'
            % s, res)
Example #6
0
def embedding(upper_corr, full_shape, mask, n_components):
    '''
    Diffusion embedding on connectivity matrix using mapaling package:
    https://github.com/satra/mapalign
    '''
    # reconstruct full matrix
    print '...full matrix'
    full_corr = np.zeros(tuple(full_shape))
    full_corr[np.triu_indices_from(full_corr, k=1)] = np.nan_to_num(upper_corr)
    full_corr += full_corr.T
    all_vertex = range(full_corr.shape[0])

    # apply mask
    print '...mask'
    masked_corr = np.delete(full_corr, mask, 0)
    del full_corr
    masked_corr = np.delete(masked_corr, mask, 1)
    cortex = np.delete(all_vertex, mask)

    # run actual embedding
    print '...embed'
    K = (masked_corr + 1) / 2.
    del masked_corr
    K[np.where(np.eye(K.shape[0]) == 1)] = 1.0
    #v = np.sqrt(np.sum(K, axis=1))
    #A = K/(v[:, None] * v[None, :])
    #del K, v
    #A = np.squeeze(A * [A > 0])
    #embedding_results = runEmbed(A, n_components_embedding)
    embedding_results, embedding_dict = embed.compute_diffusion_map(
        K, n_components=n_components, overwrite=True)

    # reconstruct masked vertices as zeros
    embedding_recort = np.zeros((len(all_vertex), embedding_results.shape[1]))
    for e in range(embedding_results.shape[1]):
        embedding_recort[:, e] = recort(len(all_vertex),
                                        embedding_results[:, e], cortex, 0)

    return embedding_recort, embedding_dict
Example #7
0
def t1embedding(upper_corr, full_shape, mask, n_components):
    import numpy as np
    from mapalign import embed

    # reconstruct full matrix
    print '...full matrix'
    full_corr = np.zeros(tuple(full_shape))
    full_corr[np.triu_indices_from(full_corr, k=1)] = np.nan_to_num(upper_corr)
    full_corr += full_corr.T
    all_vertex=range(full_corr.shape[0])

    # apply mask
    print '...mask'
    masked_corr = np.delete(full_corr, mask, 0)
    del full_corr
    masked_corr = np.delete(masked_corr, mask, 1)
    cortex=np.delete(all_vertex, mask)

    # run actual embedding
    print '...embed'
    K=1-(masked_corr/masked_corr.max())
    #K = (masked_corr + 1) / 2.
    del masked_corr
    K[np.where(np.eye(K.shape[0])==1)]=1.0
    #v = np.sqrt(np.sum(K, axis=1))
    #A = K/(v[:, None] * v[None, :])
    #del K, v
    #A = np.squeeze(A * [A > 0])
    #embedding_results = runEmbed(A, n_components_embedding)
    embedding_results, embedding_dict = embed.compute_diffusion_map(K, n_components=n_components, overwrite=True)

    # reconstruct masked vertices as zeros
    embedding_recort=np.zeros((len(all_vertex),embedding_results.shape[1]))
    for e in range(embedding_results.shape[1]):
        embedding_recort[:,e]=recort(len(all_vertex), embedding_results[:,e], cortex, 0)

    return embedding_recort, embedding_dict
Example #8
0
def main(
    workdir,
    outdir,
    atlas,
    kernel,
    sparsity,
    affinity,
    approach,
    gradients,
    subcort,
    neurosynth,
    neurosynth_file,
    sleuth_file,
    nimare_dataset,
    roi_mask,
    term,
    topic,
):
    workdir = op.join(workdir, "tmp")
    if op.isdir(workdir):
        shutil.rmtree(workdir)
    os.makedirs(workdir)

    atlas_name = "atlas-{0}".format(atlas)
    kernel_name = "kernel-{0}".format(kernel)
    sparsity_name = "sparsity-{0}".format(sparsity)
    affinity_name = "affinity-{0}".format(affinity)
    approach_name = "approach-{0}".format(approach)
    gradients_name = "gradients-{0}".format(gradients)
    dset = None

    # handle neurosynth dataset, if called
    if neurosynth:
        if neurosynth_file is None:

            ns_data_dir = op.join(workdir, "neurosynth")
            dataset_file = op.join(ns_data_dir, "neurosynth_dataset.pkl.gz")
            # download neurosynth dataset if necessary
            if not op.isfile(dataset_file):
                neurosynth_download(ns_data_dir)

        else:
            dataset_file = neurosynth_file

        dset = Dataset.load(dataset_file)
        dataset_name = "dataset-neurosynth"

    # handle sleuth text file, if called
    if sleuth_file is not None:
        dset = convert_sleuth_to_dataset(sleuth_file, target="mni152_2mm")
        dataset_name = "dataset-{0}".format(op.basename(sleuth_file).split(".")[0])

    if nimare_dataset is not None:
        dset = Dataset.load(nimare_dataset)
        dataset_name = "dataset-{0}".format(op.basename(nimare_dataset).split(".")[0])

    if dset:
        # slice studies, if needed
        if roi_mask is not None:
            roi_ids = dset.get_studies_by_mask(roi_mask)
            print(
                "{}/{} studies report at least one coordinate in the "
                "ROI".format(len(roi_ids), len(dset.ids))
            )
            dset_sel = dset.slice(roi_ids)
            dset = dset_sel
            dataset_name = "dataset-neurosynth_mask-{0}".format(
                op.basename(roi_mask).split(".")[0]
            )

        if term is not None:
            labels = ["Neurosynth_TFIDF__{label}".format(label=label) for label in [term]]
            term_ids = dset.get_studies_by_label(labels=labels, label_threshold=0.1)
            print(
                "{}/{} studies report association "
                "with the term {}".format(len(term_ids), len(dset.ids), term)
            )
            dset_sel = dset.slice(term_ids)
            dset = dset_sel
            # img_inds = np.nonzero(dset.masker.mask_img.get_fdata())  # unused
            # vox_locs = np.unravel_index(img_inds, dset.masker.mask_img.shape)  # unused
            dataset_name = "dataset-neurosynth_term-{0}".format(term)

        if topic is not None:
            topics = [
                "Neurosynth_{version}__{topic}".format(version=topic[0], topic=topic)
                for topic in topic[1:]
            ]
            topics_ids = []
            for topic in topics:
                topic_ids = dset.annotations.id[np.where(dset.annotations[topic])[0]].tolist()
                topics_ids.extend(topic_ids)
                print(
                    "{}/{} studies report association "
                    "with the term {}".format(len(topic_ids), len(dset.ids), topic)
                )
            topics_ids_unique = np.unique(topics_ids)
            print("{} unique ids".format(len(topics_ids_unique)))
            dset_sel = dset.slice(topics_ids_unique)
            dset = dset_sel
            # img_inds = np.nonzero(dset.masker.mask_img.get_fdata())  # unused
            # vox_locs = np.unravel_index(img_inds, dset.masker.mask_img.shape)  # unused
            dataset_name = "dataset-neurosynth_topic-{0}".format("_".join(topic[1:]))

        if (
            neurosynth
            or (sleuth_file is not None)
            or (nimare_dataset is not None)
        ):
            # set kernel for MA smoothing
            if kernel == "peaks2maps":
                print("Running peak2maps")
                k = Peaks2MapsKernel(resample_to_mask=True)
            elif kernel == "alekernel":
                print("Running alekernel")
                k = ALEKernel(fwhm=15)

            if atlas is not None:
                if atlas == "harvard-oxford":
                    print("Parcellating using the Harvard Oxford Atlas")
                    # atlas_labels = atlas.labels[1:]  # unused
                    atlas_shape = atlas.maps.shape
                    atlas_affine = atlas.maps.affine
                    atlas_data = atlas.maps.get_fdata()
                elif atlas == "aal":
                    print("Parcellating using the AAL Atlas")
                    atlas = datasets.fetch_atlas_aal()
                    # atlas_labels = atlas.labels  # unused
                    atlas_shape = nib.load(atlas.maps).shape
                    atlas_affine = nib.load(atlas.maps).affine
                    atlas_data = nib.load(atlas.maps).get_fdata()
                elif atlas == "craddock-2012":
                    print("Parcellating using the Craddock-2012 Atlas")
                    atlas = datasets.fetch_atlas_craddock_2012()
                elif atlas == "destrieux-2009":
                    print("Parcellating using the Destrieux-2009 Atlas")
                    atlas = datasets.fetch_atlas_destrieux_2009(lateralized=True)
                    # atlas_labels = atlas.labels[3:]  # unused
                    atlas_shape = nib.load(atlas.maps).shape
                    atlas_affine = nib.load(atlas.maps).affine
                    atlas_data = nib.load(atlas.maps).get_fdata()
                elif atlas == "msdl":
                    print("Parcellating using the MSDL Atlas")
                    atlas = datasets.fetch_atlas_msdl()
                elif atlas == "surface":
                    print("Generating surface vertices")

                if atlas != "fsaverage5" and atlas != "hcp":
                    imgs = k.transform(dset, return_type="image")

                    masker = NiftiLabelsMasker(
                        labels_img=atlas.maps, standardize=True, memory="nilearn_cache"
                    )
                    time_series = masker.fit_transform(imgs)

                else:
                    # change to array for other approach
                    imgs = k.transform(dset, return_type="image")
                    print(np.shape(imgs))

                    if atlas == "fsaverage5":
                        fsaverage = fetch_surf_fsaverage(mesh="fsaverage5")
                        pial_left = fsaverage.pial_left
                        pial_right = fsaverage.pial_right
                        medial_wall_inds_left = surface.load_surf_data(
                            "./templates/lh.Medial_wall.label"
                        )
                        print(np.shape(medial_wall_inds_left))
                        medial_wall_inds_right = surface.load_surf_data(
                            "./templates/rh.Medial_wall.label"
                        )
                        print(np.shape(medial_wall_inds_right))
                        sulc_left = fsaverage.sulc_left
                        sulc_right = fsaverage.sulc_right

                    elif atlas == "hcp":
                        pial_left = "./templates/S1200.L.pial_MSMAll.32k_fs_LR.surf.gii"
                        pial_right = "./templates/S1200.R.pial_MSMAll.32k_fs_LR.surf.gii"
                        medial_wall_inds_left = np.where(
                            nib.load("./templates/hcp.tmp.lh.dscalar.nii").get_fdata()[0] == 0
                        )[0]
                        medial_wall_inds_right = np.where(
                            nib.load("./templates/hcp.tmp.rh.dscalar.nii").get_fdata()[0] == 0
                        )[0]
                        left_verts = 32492 - len(medial_wall_inds_left)
                        sulc_left = nib.load(
                            "./templates/S1200.sulc_MSMAll.32k_fs_LR.dscalar.nii"
                        ).get_fdata()[0][0:left_verts]
                        sulc_left = np.insert(
                            sulc_left,
                            np.subtract(
                                medial_wall_inds_left, np.arange(len(medial_wall_inds_left))
                            ),
                            0,
                        )
                        sulc_right = nib.load(
                            "./templates/S1200.sulc_MSMAll.32k_fs_LR.dscalar.nii"
                        ).get_fdata()[0][left_verts:]
                        sulc_right = np.insert(
                            sulc_right,
                            np.subtract(
                                medial_wall_inds_right, np.arange(len(medial_wall_inds_right))
                            ),
                            0,
                        )

                    surf_lh = surface.vol_to_surf(
                        imgs,
                        pial_left,
                        radius=6.0,
                        interpolation="nearest",
                        kind="ball",
                        n_samples=None,
                        mask_img=dset.masker.mask_img,
                    )
                    surf_rh = surface.vol_to_surf(
                        imgs,
                        pial_right,
                        radius=6.0,
                        interpolation="nearest",
                        kind="ball",
                        n_samples=None,
                        mask_img=dset.masker.mask_img,
                    )
                    surfs = np.transpose(np.vstack((surf_lh, surf_rh)))
                    del surf_lh, surf_rh

                    # handle cortex first
                    coords_left = surface.load_surf_data(pial_left)[0]
                    coords_left = np.delete(coords_left, medial_wall_inds_left, axis=0)
                    coords_right = surface.load_surf_data(pial_right)[0]
                    coords_right = np.delete(coords_right, medial_wall_inds_right, axis=0)

                    print("Left Hemipshere Vertices")
                    surface_macms_lh, inds_discard_lh = build_macms(dset, surfs, coords_left)
                    print(np.shape(surface_macms_lh))
                    print(inds_discard_lh)

                    print("Right Hemipshere Vertices")
                    surface_macms_rh, inds_discard_rh = build_macms(dset, surfs, coords_right)
                    print(np.shape(surface_macms_rh))
                    print(len(inds_discard_rh))

                    lh_vertices_total = np.shape(surface_macms_lh)[0]
                    rh_vertices_total = np.shape(surface_macms_rh)[0]
                    time_series = np.transpose(np.vstack((surface_macms_lh, surface_macms_rh)))
                    print(np.shape(time_series))
                    del surface_macms_lh, surface_macms_rh

                    if subcort:
                        subcort_img = nib.load("templates/rois-subcortical_mni152_mask.nii.gz")
                        subcort_vox = np.asarray(np.where(subcort_img.get_fdata()))
                        subcort_mm = vox2mm(subcort_vox.T, subcort_img.affine)

                        print("Subcortical Voxels")
                        subcort_macm, inds_discard_subcort = build_macms(dset, surfs, subcort_mm)

                        num_subcort_vox = np.shape(subcort_macm)[0]
                        print(inds_discard_subcort)

                        time_series = np.hstack((time_series, np.asarray(subcort_macm).T))
                        print(np.shape(time_series))

                time_series = time_series.astype("float32")

                print("calculating correlation matrix")
                correlation = ConnectivityMeasure(kind="correlation")
                time_series = correlation.fit_transform([time_series])[0]
                print(np.shape(time_series))

                if affinity == "cosine":
                    time_series = calculate_affinity(time_series, 10 * sparsity)

            else:
                time_series = np.transpose(k.transform(dset, return_type="array"))

    print("Performing gradient analysis")

    gradients, statistics = embed.compute_diffusion_map(
        time_series, alpha=0.5, return_result=True, overwrite=True
    )
    pickle.dump(statistics, open(op.join(workdir, "statistics.p"), "wb"))

    # if subcortical included in gradient decomposition, remove gradient scores
    if subcort:
        subcort_grads = gradients[np.shape(gradients)[0] - num_subcort_vox :, :]
        subcort_grads = insert(subcort_grads, inds_discard_subcort)
        gradients = gradients[0 : np.shape(gradients)[0] - num_subcort_vox, :]

    # get left hemisphere gradient scores, and insert 0's where medial wall is
    gradients_lh = gradients[0:lh_vertices_total, :]
    if len(inds_discard_lh) > 0:
        gradients_lh = insert(gradients_lh, inds_discard_lh)
    gradients_lh = insert(gradients_lh, medial_wall_inds_left)

    # get right hemisphere gradient scores and insert 0's where medial wall is
    gradients_rh = gradients[-rh_vertices_total:, :]
    if len(inds_discard_rh) > 0:
        gradients_rh = insert(gradients_rh, inds_discard_rh)
    gradients_rh = insert(gradients_rh, medial_wall_inds_right)

    grad_dict = {
        "grads_lh": gradients_lh,
        "grads_rh": gradients_rh,
        "pial_left": pial_left,
        "sulc_left": sulc_left,
        "pial_right": pial_right,
        "sulc_right": sulc_right,
    }
    if subcort:
        grad_dict["subcort_grads"] = subcort_grads
    pickle.dump(grad_dict, open(op.join(workdir, "gradients.p"), "wb"))

    # map the gradient to the parcels
    for i in range(np.shape(gradients)[1]):
        if atlas is not None:
            if atlas == "fsaverage5" or atlas == "hcp":

                plot_surfaces(grad_dict, i, workdir)

                if subcort:
                    tmpimg = masking.unmask(subcort_grads[:, i], subcort_img)
                    nib.save(tmpimg, op.join(workdir, "gradient-{0}.nii.gz".format(i)))
            else:
                tmpimg = np.zeros(atlas_shape)
                for j, n in enumerate(np.unique(atlas_data)[1:]):
                    inds = atlas_data == n
                    tmpimg[inds] = gradients[j, i]
                    nib.save(
                        nib.Nifti1Image(tmpimg, atlas_affine),
                        op.join(workdir, "gradient-{0}.nii.gz".format(i)),
                    )
        else:
            tmpimg = np.zeros(np.prod(dset.masker.mask_img.shape))
            inds = np.ravel_multi_index(
                np.nonzero(dset.masker.mask_img.get_fdata()), dset.masker.mask_img.shape
            )
            tmpimg[inds] = gradients[:, i]
            nib.save(
                nib.Nifti1Image(
                    np.reshape(tmpimg, dset.masker.mask_img.shape), dset.masker.mask_img.affine
                ),
                op.join(workdir, "gradient-{0}.nii.gz".format(i)),
            )

            os.system(
                "python3 /Users/miriedel/Desktop/GitHub/surflay/make_figures.py "
                "-f {grad_image} --colormap jet".format(
                    grad_image=op.join(workdir, "gradient-{0}.nii.gz".format(i))
                )
            )

    output_dir = op.join(
        outdir,
        (
            f"{dataset_name}_{atlas_name}_{kernel_name}_{sparsity_name}_{gradients_name}_"
            f"{affinity_name}_{approach_name}"
        )
    )

    shutil.copytree(workdir, output_dir)

    shutil.rmtree(workdir)
Example #9
0
def load_network(kind,
                 parcel,
                 data="lau",
                 weights='log',
                 hemi="both",
                 version=1,
                 subset="all",
                 path=None):
    '''
    Function to load a dictionary containing information about the specified
    brain network.

    Parameters
    ----------
    kind : string
        Either 'SC' or 'FC'.
    hemi : string
        Either "both", "L" or "R".
    weights " string
        The weights of the edges. The options  "normal", "log" or "binary".
        The default is "log".
    data : string
        Either "HCP" or "lau".
    parcel : string
        Either "68", "114", ... [if 'lau'] / "s400", "s800" [if "HCP"]
    version : int
        Version of the network.
    subset : string
        Either 'discov', 'valid' or 'all'
    path : string
        path to the "data" folder in which the data will be stored. If
        none, then assumes that path is current folder.

    Returns
    -------
    Network : dictionary
        Dictionary storing relevant attributes about the network
    '''

    # Initialize dictionary + store basic information about the network
    Network = {}
    Network["info"] = {}
    Network["info"]["kind"] = kind
    Network["info"]["parcel"] = parcel
    Network["info"]["data"] = data
    Network["info"]["hemi"] = hemi
    Network["info"]["weights"] = weights
    Network["info"]["version"] = version
    Network["info"]["subset"] = subset

    # Modify parameter names to what they are in file names
    version = '' if version == 1 else '_v' + str(version)
    subset = '' if subset == 'all' else subset
    hemi = '' if hemi == 'both' else hemi

    # Store important paths for loading the relevant data
    main_path = path + "/brainNetworks/" + data + "/"
    network_path = (main_path + "matrices/consensus/" + subset + kind +
                    parcel + hemi + version + "/")
    matrix_path = network_path + "/" + weights

    # Store general information about the network's parcellation
    parcel_info = get_general_parcellation_info(parcel)
    Network['order'] = parcel_info[0]
    Network['noplot'] = parcel_info[1]
    Network['lhannot'] = parcel_info[2]
    Network['rhannot'] = parcel_info[3]
    Network['atlas'] = parcel_info[4]

    # Load the cammoun_id of the parcellation, if Cammoun (i.e. 033, 060, etc.)
    if parcel[0] != 's':
        Network['cammoun_id'] = parcel_to_n(parcel)

    # masks
    masks = get_node_masks(Network, path=main_path)
    Network['node_mask'] = masks[0]
    Network['hemi_mask'] = masks[1]
    Network['subcortex_mask'] = masks[2]

    # hemisphere
    Network['hemi'] = get_hemi(Network, path=main_path)

    # coordinates
    Network['coords'] = get_coordinates(Network, path=main_path)

    # Adjacency matrix
    Network['adj'], last_modified = get_adjacency(Network,
                                                  matrix_path,
                                                  minimal_processing=True,
                                                  return_last_modified=True)

    # Test whether the network is connected. Raise a warning if not...
    if not np.all(bct.reachdist(Network['adj'])[0]):
        warnings.warn(("This brain network appears to be disconnected. This "
                       "might cause problem for the computation of the other "
                       "measures"))

    # node strength
    Network["str"] = np.sum(Network['adj'], axis=0)

    # Inverse of adjacency matrix
    inv = Network['adj'].copy()
    inv[Network['adj'] > 0] = 1 / inv[Network['adj'] > 0]
    Network["inv_adj"] = inv

    # distance
    Network["dist"] = cdist(Network["coords"], Network["coords"])

    # clustering coefficient
    Network["cc"] = bct.clustering_coef_wu(Network['adj'])

    # shortest path
    Network['sp'] = get_shortest_path(Network,
                                      matrix_path=matrix_path,
                                      last_modified=last_modified)

    # diffusion embedding
    de = compute_diffusion_map(Network['adj'],
                               n_components=10,
                               return_result=True,
                               skip_checks=True)
    Network["de"] = de[0]
    Network["de_extra"] = de[1]

    # Principal components
    Network['PCs'], Network['PCs_ev'] = getPCs(Network['adj'])

    # eigenvector centrality
    Network["ec"] = bct.eigenvector_centrality_und(Network['adj'])

    # mean first passage time
    Network["mfpt"] = bct.mean_first_passage_time(Network['adj'])

    # betweenness centrality
    Network['bc'] = get_betweenness(Network,
                                    matrix_path=matrix_path,
                                    last_modified=last_modified)

    # routing efficiency
    Network["r_eff"] = efficiency(Network)

    # diffusion efficiency
    Network["d_eff"] = efficiency_diffusion(Network)

    # subgraph centrality
    Network["subc"] = bct.subgraph_centrality(Network["adj"])

    # closeness centrality
    Network['clo'] = 1 / np.mean(Network['sp'], axis=0)

    # communities + participation coefficient
    path = matrix_path + "/communities/"
    if os.path.exists(path):
        files = []
        for i in os.listdir(path):
            if os.path.isfile(os.path.join(path, i)) and 'ci_' in i:
                files.append(i)
        if len(files) > 0:
            Network["ci"] = []
            for file in files:
                Network["ci"].append(np.load(os.path.join(path, file)))

            Network["ppc"] = []
            for i in range(len(files)):
                ppc = bct.participation_coef(Network['adj'], Network["ci"][i])
                Network["ppc"].append(ppc)

    # Edge lengths
    if (data == "HCP") and (kind == "SC"):
        path = main_path + "matrices/" + subset + kind + parcel + hemi + "_lengths.npy"
        if os.path.exists(path):
            Network["lengths"] = np.load(path)

    # streamline connection lengths
    path = network_path + "/len.npy"
    if os.path.exists(path):
        Network['len'] = np.load(path)

    # ROI names
    if parcel[0] != "s":
        Network['ROInames'] = get_ROInames(Network)

    # geodesic distances between nodes
    if parcel[0] == "s":
        n = parcel[1:]
        fname_l = n + "Parcels7Networks_lh_dist.csv"
        fname_r = n + "Parcels7Networks_rh_dist.csv"
    else:
        fname_l = "scale" + Network['cammoun_id'] + "_lh_dist.csv"
        fname_r = "scale" + Network['cammoun_id'] + "_rh_dist.csv"
    Network['geo_dist_L'] = pd.read_csv(main_path + "/geodesic/medial/" +
                                        fname_l,
                                        header=None).values
    Network['geo_dist_R'] = pd.read_csv(main_path + "/geodesic/medial/" +
                                        fname_r,
                                        header=None).values

    return Network
Example #10
0
#### threshold at 90th percentile
print('thresholding each row at its 90th percentile...')
perc = np.array([np.percentile(x, 90) for x in indiv_matrix])
N    = indiv_matrix.shape[0]

for i in range(N):
    indiv_matrix[i, indiv_matrix[i,:] < perc[i]] = 0

indiv_matrix[indiv_matrix < 0] = 0

####  compute the affinity matrix
print('calculating affinity matrix with scipy...')
indiv_matrix = spatial.distance.pdist(indiv_matrix, metric = 'cosine')
indiv_matrix = spatial.distance.squareform(indiv_matrix)
indiv_matrix = 1.0 - indiv_matrix

print('affinity shape ', np.shape(indiv_matrix))

####  get gradients
print('computing gradients...')
emb, res = embed.compute_diffusion_map(indiv_matrix, alpha = 0.5,
                                       n_components = 10,
                                       return_result=True)

#### save
out_name_emb = os.path.join(out_prfx + '_dense_emb.npy')
out_name_res = os.path.join(out_prfx + '_dense_res.npy')
print(out_name_emb)
np.save(out_name_emb, emb)
np.save(out_name_res, res['lambdas'])
Example #11
0
X = np.random.randint(0,1000,size=(500,200))
corr = np.corrcoef(X)

# correlation matrix with ones on diagonal and positive version
corr1 = corr.copy()
L1 = (corr1+1)/2

# correlation matrix with zeros on diagonal, positive version (0.5 on diagonal), correction back to zero diagonal
corr0 = corr.copy()
corr0[np.where(np.eye(corr0.shape[0])==1)]=0.0
L05 = (corr0+1)/2
L0 = L05.copy()
L0[np.where(np.eye(L0.shape[0])==1)]=0.0

# embedding of all threee versions
E1, R1 = compute_diffusion_map(L1, n_components=10)
E0, R0 = compute_diffusion_map(L0, n_components=10)
E05, R05 = compute_diffusion_map(L05, n_components=10)


E0_E1 = False
E0_E05 = False
E05_E1 = False
tol = 0.000001
while any([E0_E1, E0_E05, E05_E1])==False:

    E1_E0 = np.allclose(E0,E1,atol=tol)
    if E1_E0 == True:
        print "E0 = E1 with tolerance %s"%str(tol)

    E0_E05 = np.allclose(E0,E05,atol=tol)
Example #12
0
    def __init__(self,
                 kind,
                 parcel,
                 data='lau',
                 hemi='both',
                 binary=False,
                 version=1,
                 subset='all',
                 path=None):

        mainPath = path + "/brainNetworks/" + data + "/"
        home = os.path.expanduser("~")

        self.info = {}
        self.info["kind"] = kind
        self.info["parcel"] = parcel
        self.info["data"] = data
        self.info["hemi"] = hemi
        self.info["binary"] = binary
        self.info["version"] = version
        self.info["subset"] = subset

        if version == 1:
            version = ''
        else:
            version = "_v2"

        if binary is True:
            binary = "b"
        else:
            binary = ''

        if subset == "all":
            subset = ''

        if hemi == "both":
            hemi = ''

        matrxPath = mainPath + "matrices/" + subset + kind + parcel + hemi + binary + version

        # hemisphere
        self.hemi = np.load(matrxPath + "/hemi.npy")

        # Adjacency matrix
        path = matrxPath + ".npy"
        A = np.load(path)

        # Look at time when file was last modified
        last_modified = os.path.getmtime(path)

        # set negative values to 0, fill diagonal, make symmetric
        A[A < 0] = 0
        np.fill_diagonal(A, 0)
        A = (A + A.T) / 2
        self.adj = A

        # Number of nodes in the network
        self.n = len(self.adj)

        # coordinates
        path = mainPath + "coords/coords" + parcel + hemi + ".npy"
        self.coords = np.load(path)

        # Inverse of adjacency matrix
        inv = A.copy()
        inv[A > 0] = 1 / inv[A > 0]
        self.inv_adj = inv

        # distance
        self.dist = cdist(self.coords, self.coords)

        # shortest path
        #
        # Loaded from saved file...
        # IF file not found OR Adjacency was modified after creation,
        # then recompute measure
        path = matrxPath + "/sp.npy"

        if os.path.exists(path) is False:
            print("shortest path not found")
            print("computing shortest path...")
            self.sp = bct.distance_wei(self.inv_adj)[0]
            np.save(matrxPath + "/sp.npy", self.sp)

        elif os.path.getmtime(path) < last_modified:
            print("new adjacency matrix was found")
            print("computing shortest paths...")
            self.sp = bct.distance_wei(self.inv_adj)[0]
            np.save(matrxPath + "/sp.npy", self.sp)

        else:
            self.sp = np.load(path)

        # diffusion embedding
        de = compute_diffusion_map(A, n_components=10, return_result=True)
        self.de = de[0]
        self.de_extra = de[1]

        # Principal components
        self.PCs, self.PCs_ev = load_data.getPCs(self.adj)

        # betweenness centrality
        #
        # Loaded from saved file...
        # IF file not found OR Adjacency was modified after creation,
        # then recompute measure
        path = matrxPath + "/bc.npy"
        if os.path.exists(path) is False:

            print("betweenness centrality not found")
            print("computing betweenness centrality...")
            self.bc = bct.betweenness_wei(self.inv_adj)
            np.save(matrxPath + "/bc.npy", self.bc)

        elif os.path.getmtime(path) < last_modified:
            print("new adjacency matrix was found")
            print("recomputing betweeness centrality...")
            self.bc = bct.betweenness_wei(self.inv_adj)
            np.save(matrxPath + "/bc.npy", self.bc)

        else:
            self.bc = np.load(path)

        # communities + participation coefficient
        path = matrxPath + "/communities/"
        if os.path.exists(path):
            files = []
            for i in os.listdir(path):
                if os.path.isfile(os.path.join(path, i)) and 'ci_' in i:
                    files.append(i)
            if len(files) > 0:
                self.ci = []
                for file in files:
                    self.ci.append(np.load(os.path.join(path, file)))

                self.ppc = []
                for i in range(len(files)):
                    ppc = bct.participation_coef(A, self.ci[i])
                    self.ppc.append(ppc)

        if (data == "HCP") and (kind == "SC"):
            path = mainPath + "matrices/" + subset + kind + parcel + hemi + "_lengths.npy"
            self.lengths = np.load(path)

        # streamline connection lengths
        path = matrxPath + "/len.npy"
        if os.path.exists(path) is True:
            self.len = np.load(path)

        # network information
        if parcel[0] == "s":
            nb = parcel[1:]
            self.order = "LR"
            self.noplot = [b'Background+FreeSurfer_Defined_Medial_Wall', b'']
            self.lhannot = (home + "/"
                            "nnt-data/"
                            "atl-schaefer2018/"
                            "fsaverage/"
                            "atl-Schaefer2018_space-fsaverage_"
                            "hemi-L_desc-" + nb + "Parcels7Networks_"
                            "deterministic.annot")
            self.rhannot = (home + "/"
                            "nnt-data/"
                            "atl-schaefer2018/"
                            "fsaverage/"
                            "atl-Schaefer2018_space-fsaverage_"
                            "hemi-R_desc-" + nb + "Parcels7Networks_"
                            "deterministic.annot")
        else:
            nb = _parcel_to_n(parcel)
            self.order = "RL"
            self.noplot = None
            self.lhannot = (home + "/"
                            "nnt-data/"
                            "atl-cammoun2012/"
                            "fsaverage/"
                            "atl-Cammoun2012_space-fsaverage_"
                            "res-" + nb + "_hemi-L_deterministic.annot")
            self.rhannot = (home + "/"
                            "nnt-data/"
                            "atl-cammoun2012/"
                            "fsaverage/"
                            "atl-Cammoun2012_space-fsaverage_"
                            "res-" + nb + "_hemi-R_deterministic.annot")
            self.cammoun_id = nb
Example #13
0
import numpy as np

# Load affinitity matrix
aff = np.load('gradient_data/conn_matrices/cosine_affinity.npy')

import sys
sys.path.append("../mapalign")
from mapalign import embed

emb, res = embed.compute_diffusion_map(aff, alpha=0.5)
print("Minimum value is %f" % fcmatrix.min())

# how many nodes have negative values
# Count negative values per row
neg_values = np.array([sum(fcmatrix[i,:] < 0) for i in range(N)])
print("Negative values occur in %d rows" % sum(neg_values > 0))


# example subject 1 had no negative values survive, but imagine other subjects might we set these to zero
fcmatrix[fcmatrix < 0] = 0

# Now we are dealing with sparse vectors. Cosine similarity is used as affinity metric
aff = 1 - pairwise_distances(fcmatrix, metric = 'cosine')


### Commented out saving this: cubic can't hang with multiple 17734x17734 matrices saved per subject (few GB each)
# Save affinity matrix
# savepath= "/cbica/projects/pinesParcels/data/CombinedData/" + str(sid) + "/vertexwise_cos_affinmat.npy"

# np.save(savepath, aff)

# save checkpoint reached, now compute dmap
from mapalign import embed
emb, res = embed.compute_diffusion_map(aff, alpha = 0.5, return_result=True)

savepathe= "/cbica/projects/pinesParcels/data/CombinedData/" + str(sid) + "/vertexwise_emb.npy"
savepathr= "/cbica/projects/pinesParcels/data/CombinedData/" + str(sid) + "/vertexwise_res.npy"

np.save(savepathe, emb)
np.save(savepathr, res)
Example #15
0
X = np.random.randint(0, 1000, size=(500, 200))
corr = np.corrcoef(X)

# correlation matrix with ones on diagonal and positive version
corr1 = corr.copy()
L1 = (corr1 + 1) / 2

# correlation matrix with zeros on diagonal, positive version (0.5 on diagonal), correction back to zero diagonal
corr0 = corr.copy()
corr0[np.where(np.eye(corr0.shape[0]) == 1)] = 0.0
L05 = (corr0 + 1) / 2
L0 = L05.copy()
L0[np.where(np.eye(L0.shape[0]) == 1)] = 0.0

# embedding of all threee versions
E1, R1 = compute_diffusion_map(L1, n_components=10)
E0, R0 = compute_diffusion_map(L0, n_components=10)
E05, R05 = compute_diffusion_map(L05, n_components=10)

E0_E1 = False
E0_E05 = False
E05_E1 = False
tol = 0.000001
while any([E0_E1, E0_E05, E05_E1]) == False:

    E1_E0 = np.allclose(E0, E1, atol=tol)
    if E1_E0 == True:
        print "E0 = E1 with tolerance %s" % str(tol)

    E0_E05 = np.allclose(E0, E05, atol=tol)
    if E0_E05 == True: