コード例 #1
0
def get_graph_from_bare_data(corr_mat_fname,
                             blacklist_fname,
                             density,
                             include_mst=False,
                             weighted=False):
    """
    Extracts a graph from raw data.

    Parameters
    ----------
    corr_mat_fname : str
        path to the file containing the correlation matrix.
    blacklist_fname : str
        path to the bool blacklist
    density : float
        the network density to use
    include_mst : bool
        whether to include the maximum spanning tree
    weighted : bool
        whether to consider the network as weighted

    Returns
    -------
    net : igraph.Graph
        the network
    """
    corr_mat = dataio.load_adj_matrix_from_mat(corr_mat_fname)
    ok_nodes = dataio.get_ok_nodes(blacklist_fname)
    net = make_net_from_unfiltered_data(corr_mat,
                                        ok_nodes,
                                        density,
                                        include_mst=include_mst,
                                        weighted=weighted)
    return net
コード例 #2
0
def comstructuresToMapsWithLabelings(coms1, coms2, mapFileNames,
                                     fmriRoiInfoFileName, blacklistFileName):
    rois = dataio.loadMat(fmriRoiInfoFileName, squeeze_me=True)["rois"]
    okNodes = dataio.get_ok_nodes(len(rois), blacklistFileName)
    okRois = rois[okNodes]
    # weird empty array as first element...
    okRoiAalLabels = np.unique(okRois['aal_label'])[1:]
    okRoiAalIds = np.unique(okRois['aal_ID'])

    comIds1 = np.unique(coms1[okNodes])
    comIds2 = np.unique(coms2[okNodes])
    print comIds1
    overlapMatrix = np.zeros((len(comIds1), len(comIds2)))
    overlapNodesMatrix = []
    for i, comId1 in enumerate(comIds1):
        overlapNodesMatrix.append([])
        comId1Nodes = (coms1 == comId1)
        for j, comId2 in enumerate(comIds2):
            overlapNo
            comId2Nodes = (coms2 == comId2)
            # get joint nodes:
            jointNodes = comId1Nodes * comId2Nodes
            overlapNodesMatrix[i][j].append(np.nonzero(jointNodes))
            overlapMatrix[i, j] = np.sum(jointNodes)

    print np.sum(overlapMatrix)

    #    print okRoiAalLabels, okRoiAalIds
    return None
コード例 #3
0
ファイル: compcoms.py プロジェクト: rmkujala/brainnets
def _compute_coms_worker(args):
    """
    Computes Louvain communities.
    """
    fname, cfg, com_det_method, com_det_options= args
    coms = []
    graph = netgen.get_graph_from_bare_data(
        fname, cfg['blacklist_fname'], cfg['density'],
        include_mst=cfg['include_mst'], weighted=False)
    membershiplists = []
    for i in range(cfg['n_it_comdet']):
        clustering = com_det_method(graph, **com_det_options)
        if isinstance(clustering, igraph.clustering.VertexDendrogram):
            clustering = clustering.as_clustering()
        membershiplists.append(clustering.membership)
    coms = np.array(membershiplists)
    ok_nodes = dataio.get_ok_nodes(cfg['blacklist_fname'])
    # expand communities to non-filtered indices
    unfiltered_coms = []
    for i, com in enumerate(coms):
        uf_com = dataio.expand_1D_node_vals_to_non_blacklisted_array(
            com, ok_nodes
        )
        unfiltered_coms.append(uf_com)
    unfiltered_coms = np.array(unfiltered_coms)
    com_det_method_tag = igraph_com_det_method_to_tag(com_det_method)
    out_fname = fnc.get_ind_fname(fname, cfg, com_det_method_tag)
    out_dict = {com_det_method_tag : unfiltered_coms,
                settings.config_tag         : cfg}
    dataio.save_pickle(out_fname, out_dict)
    print "finished " + fname
    return unfiltered_coms
コード例 #4
0
def get_link_distances_for_net(g, cfg):
    """
    Get link distances for a gwork.

    Parameters
    ----------
    g : igraph.Graph
    cfg : dict
        brainnets config dictionary

    Returns
    -------
    distances : a numpy array
        All distances in the order of the graph's edge sequence.
    """
    node_info = dataio.load_mat(cfg['node_info_fname'],
                                squeeze_me=True)["rois"]
    ok_nodes = dataio.get_ok_nodes(cfg['blacklist_fname'])
    coords = node_info['centroidMNI'][ok_nodes]
    distances = np.zeros(len(g.get_edgelist()))
    for j, e in enumerate(g.es):
        source_coords = coords[e.source]
        target_coords = coords[e.target]
        # magic digit two arises from the 2mm NMI brain!
        distances[j] = np.linalg.norm(source_coords - target_coords)
    return distances
コード例 #5
0
ファイル: compcoms.py プロジェクト: rmkujala/brainnets
def _compute_louvain_coms_worker(args):
    """
    Computes Louvain communities.
    """
    fname, cfg = args
    coms = []
    mods = []
    print "started " + fname
    graph = netgen.get_graph_from_bare_data(
        fname, cfg['blacklist_fname'], cfg['density'],
        include_mst=cfg['include_mst'], weighted=False)
    louvain_coms_dict = \
        gencomps.get_louvain_partitions(graph, False, cfg['n_it_comdet'])
    coms.extend(louvain_coms_dict[settings.louvain_cluster_tag])
    coms = np.array(coms)
    ok_nodes = dataio.get_ok_nodes(cfg['blacklist_fname'])
    # expand communities to non-filtered indices
    unfiltered_coms = []
    for i, com in enumerate(coms):
        uf_com = dataio.expand_1D_node_vals_to_non_blacklisted_array(
            com, ok_nodes
        )
        unfiltered_coms.append(uf_com)
    unfiltered_coms = np.array(unfiltered_coms)
    mods.extend(louvain_coms_dict[settings.modularity_tag])
    mods = np.array(mods)
    out_fname = fnc.get_ind_fname(fname, cfg, settings.louvain_cluster_tag)
    out_dict = {settings.louvain_cluster_tag: unfiltered_coms,
                settings.modularity_tag: mods,
                settings.config_tag: cfg}
    dataio.save_pickle(out_fname, out_dict)

    print "finished " + fname
    return unfiltered_coms, mods
コード例 #6
0
ファイル: comp_helpers.py プロジェクト: rmkujala/brainnets
def do_start(fname, blacklist_fname):
    """ Prints which work has started and returns the adj_mat """
    if settings.be_verbose:
        print "started " + fname
        sys.stdout.flush()
    adj_mat = dataio.load_adj_matrix_from_mat(fname)
    ok_nodes = dataio.get_ok_nodes(blacklist_fname)
    return adj_mat, ok_nodes
コード例 #7
0
ファイル: compcoms.py プロジェクト: rmkujala/brainnets
def comp_scaled_inclusivity_for_two_fname_groups(cfg):
    config.require(
        cfg, ["density", "group_1_mat_fnames", "group_2_mat_fnames"])
    fname_groups = [cfg['group_1_mat_fnames'], cfg['group_2_mat_fnames']]
    for i, fname_group in enumerate(fname_groups):
        clus = []
        for mat_fname in fname_group:
            clusters_fname = fnc.get_ind_fname(
                mat_fname,
                cfg,
                settings.louvain_cluster_tag
            )
            subject_clusters = dataio.load_pickle(clusters_fname)
            clus.append(subject_clusters[settings.louvain_cluster_tag])
        partitions = aux.expand_first_axis(np.array(clus))
        partitions = partitions[:, dataio.get_ok_nodes(cfg['blacklist_fname'])]
        assert np.logical_not(np.isnan(partitions)).all()
        node_SIs = gencomps.comp_scaled_inclusivity(partitions)
        out_dict = {settings.scaled_inclusivity_tag:
                    node_SIs, settings.config_tag: cfg}
        out_fname = fnc.get_group_fname(
            cfg, settings.scaled_inclusivity_tag, i)
        dataio.save_pickle(out_fname, out_dict)
コード例 #8
0
ファイル: compcoms.py プロジェクト: rmkujala/brainnets
def comp_consensus_scaled_inclusivity(cfg, group_id, n_to_consider=None):
    """
    Parameters
    ----------
    cfg : dict
        brainnets config dictionary
    group_id : int
        0 or 1 -- the group for which the scaled inclusivity should be computed
    """
    config.require(
        cfg, ["density", "group_1_mat_fnames", "group_2_mat_fnames"])

    if group_id == 0:
        fname_group = cfg['group_1_mat_fnames']

    elif group_id == 1:
        fname_group = cfg['group_2_mat_fnames']
    else:
        raise Error('Param group_id should be either 0 or 1')
    consenus_com_fname = fnc.get_group_fname(
        cfg, settings.louvain_consensus_tag, group_id)
    consensus_com = \
        dataio.load_pickle(consenus_com_fname)[settings.louvain_cluster_tag]

    clus = []
    for mat_fname in fname_group:
        clusters_fname = fnc.get_ind_fname(
            mat_fname,
            cfg,
            settings.louvain_cluster_tag
        )
        data = dataio.load_pickle(clusters_fname)
        subject_clusters = data[settings.louvain_cluster_tag]

        if n_to_consider is not None:
            if isinstance(n_to_consider, int):
                subject_clusters = subject_clusters[:n_to_consider]
            elif n_to_consider == 'best':
                max_mod_i = np.argmax(data[settings.modularity_tag])
                subject_clusters = subject_clusters[max_mod_i]
                subject_clusters = subject_clusters.reshape(
                    1, len(subject_clusters))
            else:
                assert isinstance(n_to_consider, int) or n_to_consider == 'best', \
                    "n_to_consider should be an integer!"
        clus.append(subject_clusters)

    partitions = aux.expand_first_axis(np.array(clus))
    ok_nodes = dataio.get_ok_nodes(cfg['blacklist_fname'])
    partitions = partitions[:, ok_nodes]
    consensus_com = consensus_com[ok_nodes]
    assert np.logical_not(np.isnan(partitions)).all()
    assert len(consensus_com) == len(partitions[0])

    node_SIs = gencomps.comp_scaled_inclusivity_for_ref_partition(
        consensus_com, partitions, normalize=True)
    out_dict = {settings.scaled_inclusivity_tag:
                node_SIs, settings.config_tag: cfg}
    out_fname = fnc.get_group_fname(
        cfg, settings.louvain_consensus_si_tag, group_id)
    dataio.save_pickle(out_fname, out_dict)
コード例 #9
0
ファイル: compcoms.py プロジェクト: rmkujala/brainnets
def comp_consensus_partition(cfg, fnames_tag, out_fname,
                             n_clu_for_mcla='median',
                             n_to_consider=None,
                             comdet_tag=None):
    """
    Computes a consensus partition.

    Parameters
    ----------
    cfg : dict
        a brainnets config dictionary
    fnames_tag : str
        the filenames group for which the consensus partition is
        computed
    out_fname : str
        the filename to which the consensus partition is stored
    n_clu_for_mcla : int or "median"
        maximum number or clusters in the consensus partition
        if "median", the median number is used as the max number
        of clusters in the consensus partition
    n_to_consider : int/str, optional
        number of partitions to consider for obtaining consensus
        defaults to considering _all_ partitions
        if "best" uses the partition with maximum modularity
        if available
    comdet_tag: str, optional
        e.g. "infomap"
        defaulting to settings.louvain_cluster_tag (legacy)

    Returns
    -------
    out_dict : dict
        dictionary containing the consensus partition
    """
    config.require(cfg, [fnames_tag, 'blacklist_fname', 'density'])

    ok_nodes = dataio.get_ok_nodes(cfg['blacklist_fname'])
    if comdet_tag is None:
        comdet_tag = settings.louvain_cluster_tag

    # load clusterings
    clusterings = None
    ok_nodes = dataio.get_ok_nodes(cfg['blacklist_fname'])
    for fname in cfg[fnames_tag]:
        indfname = fnc.get_ind_fname(fname, cfg, comdet_tag)
        data = dataio.load_pickle(indfname)
        clus_raw = data[comdet_tag]

        assert len(clus_raw[0]) >= np.sum(ok_nodes)
        if n_to_consider is not None:
            if isinstance(n_to_consider, int):
                clus_raw = clus_raw[:n_to_consider]
            elif n_to_consider == 'best':
                max_mod_i = np.argmax(data[settings.modularity_tag])
                clus_raw = clus_raw[max_mod_i]
                clus_raw = clus_raw.reshape(1, len(clus_raw))
            else:
                assert isinstance(n_to_consider, int) or n_to_consider == 'best', \
                    "n_to_consider should be an integer!"

        clus = clus_raw[:, ok_nodes]
        if clusterings is None:
            # for first encounter
            clusterings = np.copy(clus)
        else:
            clusterings = np.vstack((clusterings, clus))

    # this should hold usually, unless you have a non-standard workflow:
    # (added for making sure a bug does not exist anymore)
    assert len(clusterings) == len(clus) * len(cfg[fnames_tag])

    # print len(clusterings), n_clu_for_mcla
    consensus_clu = gencomps.comp_consensus_partition(
        clusterings, n_clu_for_mcla)
    consensus_clu = dataio.expand_1D_node_vals_to_non_blacklisted_array(
        consensus_clu, ok_nodes, default_value=-1)
    out_dict = {comdet_tag: consensus_clu,
                settings.config_tag: cfg}

    dataio.save_pickle(out_fname, out_dict)
    return out_dict