コード例 #1
0
def test_four_surfaces_square_max():
    surfaces_1 = load_conte69()
    surfaces_2 = load_conte69(as_sphere=True)
    t, names = temp_surfaces(surfaces_1 + surfaces_2)
    namesArr = np.reshape(np.array(names, ndmin=2),(2,2))
    dummy_test(namesArr, np.fmax)
    for i in range(0,len(t)):
        t[i].close()
コード例 #2
0
def test_two_surfaces_row_plus():
    surfaces = load_conte69()
    t, names = temp_surfaces(surfaces)
    namesArr = np.array(names, ndmin=2)
    dummy_test(namesArr)
    for i in range(0,len(t)):
        t[i].close()
コード例 #3
0
def test_12():
    surf, _ = load_conte69()

    p = np.random.randint(1, 10)
    n = np.random.randint(2, 10)

    A = np.random.rand(n, 32492)
    B = np.random.rand(n, p)
    B[:, 0] = 1  # Constant term.
    B = Term(B)

    dummy_test(A, B, surf)
コード例 #4
0
# <https://www.sciencedirect.com/science/article/pii/S1053811918304968>`_,
# which preserves the auto-correlation of the permuted feature(s) by rotating
# the feature data on the spherical domain.
# We will start by loading the conte69 surfaces for left and right hemispheres,
# their corresponding spheres, midline mask, and t1w/t2w intensity as well as
# cortical thickness data, and a template functional gradient.

import warnings

warnings.simplefilter('ignore')

import numpy as np
from brainspace.datasets import load_gradient, load_marker, load_conte69

# load the conte69 hemisphere surfaces and spheres
surf_lh, surf_rh = load_conte69()
sphere_lh, sphere_rh = load_conte69(as_sphere=True)

# Load the data
t1wt2w_lh, t1wt2w_rh = load_marker('t1wt2w')
t1wt2w = np.concatenate([t1wt2w_lh, t1wt2w_rh])

thickness_lh, thickness_rh = load_marker('thickness')
thickness = np.concatenate([thickness_lh, thickness_rh])

# Template functional gradient
embedding = load_gradient('fc', idx=0, join=True)

###############################################################################
# Let’s first generate some null data using spintest.
コード例 #5
0
###############################################################################
# As before, we’ll start by loading the sample data.

import warnings
warnings.simplefilter('ignore')

from brainspace.datasets import load_group_fc, load_parcellation, load_conte69

# First load mean connectivity matrix and Schaefer parcellation
conn_matrix = load_group_fc('schaefer', scale=400)
labeling = load_parcellation('schaefer', scale=400, join=True)

mask = labeling != 0

# and load the conte69 hemisphere surfaces
surf_lh, surf_rh = load_conte69()

###############################################################################
# The GradientMaps object allows for many different kernels and dimensionality
# reduction techniques. Let’s have a look at three different kernels.

import numpy as np

from brainspace.gradient import GradientMaps
from brainspace.plotting import plot_hemispheres
from brainspace.utils.parcellation import map_to_labels

kernels = ['pearson', 'spearman', 'normalized_angle']

gradients_kernel = [None] * len(kernels)
for i, k in enumerate(kernels):
コード例 #6
0
# Load the Schaefer 400 atlas
schaefer_400 = load_parcellation("schaefer", scale=400, join=True)

# Run the analysis
histology_profiles = read_histology_profile(template="fs_LR_64k")
mpc = compute_mpc(histology_profiles, labels=schaefer_400)
gradient_map = compute_histology_gradients(mpc)

########################################################################
# Lets plot the first gradient of histology to see what it looks like.
# We will use BrainSpace to create our plots. For full details on how
# BrainSpace's plotting functionality works, please consult the BrainSpace
# ReadTheDocs. (NOTE: Temporarily disabled due to build errors)

from brainspace.plotting.surface_plotting import plot_hemispheres
from brainspace.utils.parcellation import map_to_labels
from brainspace.datasets import load_conte69

left_surface, right_surface = load_conte69()
vertexwise_data = []
for i in range(0, 2):
    vertexwise_data.append(
        map_to_labels(
            gradient_map.gradients_[:, i],
            schaefer_400,
            mask=schaefer_400 != 0,
            fill=np.nan,
        )
    )
# plot_hemispheres(left_surface, right_surface, vertexwise_data, embed_nb=True)
コード例 #7
0
    '#8EA06F', '#8C9D70', '#8B9B71', '#8A9972', '#899673', '#889475',
    '#879176', '#868F77', '#858C78', '#848A79', '#82877A', '#81857C',
    '#80827D', '#7F807E', '#807D7D', '#827A7A', '#857777', '#877575',
    '#8A7272', '#8C6F6F', '#8F6C6C', '#916969', '#946666', '#966464',
    '#996161', '#9B5E5E', '#9D5B5B', '#A05858', '#A25656', '#A55353',
    '#A75050', '#AA4D4D', '#AC4A4A', '#AF4747', '#B14545', '#B44242',
    '#B63F3F', '#B93C3C', '#BB3939', '#BE3636', '#C03434', '#C33131',
    '#C52E2E', '#C82B2B', '#CA2828', '#CD2626'
])

# Load fsaverage5 inflated
fs5I_lh = read_surface(dir_fS + 'fsaverage5/surf/lh.inflated', itype='fs')
fs5I_rh = read_surface(dir_fS + 'fsaverage5/surf/rh.inflated', itype='fs')

# Load conte69
c69_lh, c69_rh = load_conte69()

# Load native mid surface
mid_lh, mid_rh = load_surface(
    dir_fS + subBIDS + '/surf/lh.midthickness.surf.gii',
    dir_fS + subBIDS + '/surf/rh.midthickness.surf.gii',
    with_normals=True,
    join=False)

# Load native surface
surf_lh = read_surface(dir_fS + subBIDS + '/surf/lh.pial', itype='fs')
surf_rh = read_surface(dir_fS + subBIDS + '/surf/rh.pial', itype='fs')

# Load native white matter surface
wm_lh = read_surface(dir_fS + subBIDS + '/surf/lh.white', itype='fs')
wm_rh = read_surface(dir_fS + subBIDS + '/surf/rh.white', itype='fs')
コード例 #8
0
def test_bspolydata():
    surf, _ = load_conte69()
    dummy_test(surf)