Esempio n. 1
0
def test_reduce(values, labels, kwds, out, expects):
    if expects:
        with pytest.raises(expects):
            parc.reduce_by_labels(values, labels, **kwds)
    else:
        res = parc.reduce_by_labels(values, labels, **kwds)
        assert np.allclose(res, out)
        assert res.dtype == out.dtype
        assert res.shape == out.shape
Esempio n. 2
0
def compute_mpc(profile, labels):
    """Computes MPC for given labels on a surface template.

    Parameters
    ----------
    profile : numpy.ndarray
        Histological profiles of size surface-by-vertex.
    labels : numpy.ndarray
        Labels of regions of interest. Use 0 to denote regions that will not be included.

    Returns
    -------
    numpy.ndarray
        Microstructural profile covariance.
    """

    roi_profile = reduce_by_labels(profile, labels)
    if np.any(labels == 0):
        # Remove 0's in the labels.
        roi_profile = roi_profile[:, 1:]

    p_corr = partial_correlation(roi_profile, np.mean(roi_profile, axis=1))

    mpc = 0.5 * np.log((1 + p_corr) / (1 - p_corr))
    mpc[p_corr > 0.99999] = 0  # Deals with floating point issues where p_corr==1
    mpc[mpc == np.inf] = 0
    mpc[mpc == np.nan] = 0

    return mpc
Esempio n. 3
0
def _get_prob_icn(schaefer1000_lab, yeo7_lab):
    prob_icn = reduce_by_labels(pd.get_dummies(yeo7_lab),
                                schaefer1000_lab,
                                red_op='mean',
                                dtype=np.float32,
                                axis=1)
    prob_icn /= prob_icn.sum(axis=1, keepdims=True)
    return prob_icn
Esempio n. 4
0
def _compute_net_idio(df_phen, dd, sd, lab_ref):
    """Network-wise idiosyncrasy. """

    dd_net = reduce_by_labels(dd, lab_ref, axis=0)
    dd_net = pd.DataFrame(dd_net, columns=ICN_NAMES)
    dd_net['Average'] = dd.mean(1)

    sd_net = reduce_by_labels(sd, lab_ref, axis=0)
    sd_net = pd.DataFrame(sd_net, columns=ICN_NAMES)
    sd_net['Average'] = sd.mean(1)

    df_ados = pd.concat([sd_net, dd_net], keys=['SD', 'DD'], axis=1)
    for k in ['Age', 'Group', 'Site', 'Sex', 'CSS']:
        df_ados[k] = df_phen[k].to_numpy()

    # Remove subject with no CSS
    mask_na = np.logical_or(~pd.isna(df_ados['CSS']), df_ados.Group != 'ASD')
    df_ados = df_ados[mask_na].copy()

    return df_ados
Esempio n. 5
0
def yeo_networks_associations(
    data: ArrayLike,
    template: str = "fsaverage5",
    seven_networks: bool = True,
    data_dir: Optional[Union[str, Path]] = None,
    reduction_operation: Union[str, Callable] = np.nanmean,
) -> np.ndarray:
    """Computes association

    Parameters
    ----------
    data : ArrayLike
        Data to be summarized in the Yeo networks in a sample-by-feature format.
    template : str, optional
        Surface template. Valid values are "fsaverage5", "fsaverage", and
        "fslr32k", "civet41k", and "civet164k", by default "fsaverage5".
    seven_networks : bool, optional
        If true, uses the 7 network parcellation, otherwise uses the 17 network
        parcellation, by default True.
    data_dir : str, Path, optional
        Data directory to store the Yeo network files, by default $HOME_DIR/brainstat_data/parcellation_data.
    reduction_operation : str, callable, optional
        How to summarize data. If str, options are: {‘min’, ‘max’, ‘sum’,
        ‘mean’, ‘median’, ‘mode’, ‘average’}. If callable, it should receive a
        1D array of values, array of weights (or None) and return a scalar
        value. Default is ‘mean’.

    Returns
    -------
    np.ndarray
        Summary statistic in the yeo networks.
    """
    n_regions = 7 if seven_networks else 17
    yeo_networks = fetch_parcellation(
        template=template,
        atlas="yeo",
        n_regions=n_regions,
        join=True,
        data_dir=data_dir,
    )

    if np.array(data).ndim == 1:
        data_2d = np.array(data)[:, None]
    else:
        data_2d = np.array(data)

    n_features = data_2d.shape[1]

    yeo_mean = np.zeros((n_regions + 1, n_features))
    for i in range(n_features):
        yeo_mean[:, i] = reduce_by_labels(
            data_2d[:, i], yeo_networks, red_op=reduction_operation
        )
    return yeo_mean[1:, :]
def load_hcp_timeseries(file, parcellations=None, cortex_only=True):
    """Fetches (parcellated) timeseries of a subject

    Parameters
    ----------
    subject_dir : str
        Path to an HCP subject directory
    parcellations : str, list, optional
        Files describing the parcellation of the timeseries. If multiple files
        are provided (e.g. left and right hemisphere), then the parcellations
        are concatenated in the order that they're provided in. Defaults to
        None.
    cortex_only : bool, optional
        If true, returns only cortical data, defaults to True.

    Returns
    -------
    numpy.array
        Array containing the timeseries
    """

    if isinstance(parcellations, str):
        parcellations = [parcellations]

    cii = nib.load(file)
    timeseries = cii.get_fdata()

    if cortex_only:
        timeseries = timeseries[:, :64984]

    if parcellations is not None:
        parcel_list = [nib.load(x).darrays[0].data for x in parcellations]
        parcellation = np.concatenate(parcel_list, axis=0)
        timeseries = [
            reduce_by_labels(x, parcellation)[1:] for x in timeseries
        ]

    return timeseries
Esempio n. 7
0
masked_regions = [b'Medial_wall', b'Unknown']
masked_labels = [regions.index(r) for r in masked_regions]
for r in masked_regions:
    regions.remove(r)

# Build Destrieux parcellation and mask
labeling = np.concatenate([atlas['map_left'], atlas['map_right']])
mask = ~np.isin(labeling, masked_labels)

# Distinct labels for left and right hemispheres
lab_lh = atlas['map_left']
labeling[lab_lh.size:] += lab_lh.max() + 1

# extract mean timeseries for each label
seed_ts = reduce_by_labels(clean_ts[mask],
                           labeling[mask],
                           axis=1,
                           red_op='mean')

###############################################################################
# Calculate functional connectivity matrix
# ++++++++++++++++++++++++++++++++++++++++
# The following example uses
# `nilearn <https://nilearn.github.io/auto_examples/03_connectivity/plot_
# signal_extraction.html#compute-and-display-a-correlation-matrix/>`_:

from nilearn.connectome import ConnectivityMeasure

correlation_measure = ConnectivityMeasure(kind='correlation')
correlation_matrix = correlation_measure.fit_transform([seed_ts.T])[0]

###############################################################################
# Expression is a pandas DataFrame which shows the genetic expression of genes
# within each region of the atlas. By default, the values will fall in the range
# [0, 1] where higher values represent higher expression. However, if you change
# the normalization function then this may change. Some regions may return NaN
# values for all genes. This occurs when there are no samples within this
# region across all donors. We've denoted this region with the black color in the
# matrix. By default, BrainStat uses all the default abagen parameters. If you wish to
# customize these parameters then the keyword arguments can be passed directly
# to `surface_genetic_expression`. For a full list of these arguments and their
# function please consult the abagen documentation.
#
# Next, lets have a look at the correlation between one gene (WFDC1) and our
# t-statistic map. Lets also plot the expression of this gene to the surface.

# Plot correlation with WFDC1 gene
t_stat_schaefer_100 = reduce_by_labels(slm.t.flatten(), schaefer_100_fs5)[1:]

df = pd.DataFrame({"x": t_stat_schaefer_100, "y": expression["WFDC1"]})
df.dropna(inplace=True)
plt.scatter(df.x, df.y, s=20, c="k")
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.xlabel("t-statistic", fontdict={"fontsize": 16})
plt.ylabel("WFDC1 expression", fontdict={"fontsize": 16})
plt.plot(np.unique(df.x),
         np.poly1d(np.polyfit(df.x, df.y, 1))(np.unique(df.x)), "k")
plt.text(-1.0, 0.75, f"r={df.x.corr(df.y):.2f}", fontdict={"size": 14})
plt.show()

########################################################################