コード例 #1
0
ファイル: test_simulate.py プロジェクト: codeaudit/supereeg
def test_electrode_contingencies_3_locations_can_subset():

    random_seed = np.random.seed(123)
    noise = 0

    # load mini model
    gray = se.Brain(se.load('gray', vox_size=20))

    # extract 20 locations
    gray_locs = gray.locs.iloc[:5]

    # create model from 10 locations
    mo_locs = gray_locs.sample(4, random_state=random_seed).sort_values(
        ['x', 'y', 'z'])

    # create covariance matrix from random seed
    c = se.create_cov(cov='random', n_elecs=5)

    # pull out model from covariance matrix
    data = c[:, mo_locs.index][mo_locs.index, :]

    # create model from subsetted covariance matrix and locations
    model = se.Model(numerator=np.array(data),
                     denominator=np.ones(np.shape(data)),
                     locs=mo_locs,
                     n_subs=1)

    # create brain object from the remaining locations - first find remaining locations
    sub_locs = gray_locs[~gray_locs.index.isin(mo_locs.index)]

    sub_locs = sub_locs.append(
        gray_locs.sample(1,
                         random_state=random_seed).sort_values(['x', 'y',
                                                                'z']))

    # create a brain object with all gray locations
    bo = se.simulate_bo(n_samples=5,
                        sample_rate=1000,
                        locs=gray_locs,
                        noise=noise,
                        random_seed=random_seed)

    # parse brain object to create synthetic patient data
    data = bo.data.iloc[:, sub_locs.index]

    # put data and locations together in new sample brain object
    bo_sample = se.Brain(data=data.as_matrix(),
                         locs=sub_locs,
                         sample_rate=1000)

    # predict activity at all unknown locations
    recon = model.predict(bo_sample, nearest_neighbor=False)

    # actual = bo.data.iloc[:, unknown_ind]
    actual = bo.data.iloc[:, recon.locs.index]

    corr_vals = _corr_column(actual.as_matrix(), recon.data.as_matrix())

    assert 1 >= corr_vals.mean() >= -1
    assert np.allclose(zscore(recon_3), recon.data, equal_nan=True)
コード例 #2
0
def test_electrode_contingencies_2_subset():

    random_seed = np.random.seed(123)

    noise = 0

    gray = se.Brain(se.load('gray', vox_size=20))

    # extract locations
    gray_locs = gray.locs.iloc[:5]

    mo_locs = gray_locs

    c = se.create_cov(cov='random', n_elecs=5)

    data = c[:, mo_locs.index][mo_locs.index, :]

    model = se.Model(numerator=np.array(data),
                     denominator=np.ones(np.shape(data)),
                     locs=mo_locs,
                     n_subs=1)

    # create brain object from the remaining locations - first find remaining locations
    sub_locs = mo_locs.sample(2, random_state=random_seed).sort_values(
        ['x', 'y', 'z'])

    # create a brain object with all gray locations
    bo = se.simulate_bo(n_samples=5,
                        sample_rate=1000,
                        locs=gray_locs,
                        noise=noise,
                        random_seed=random_seed)

    # parse brain object to create synthetic patient data
    data = bo.data.iloc[:, sub_locs.index]

    # put data and locations together in new sample brain object
    bo_sample = se.Brain(data=data.values, locs=sub_locs, sample_rate=1000)

    # predict activity at all unknown locations
    recon = model.predict(bo_sample, nearest_neighbor=False)

    actual = bo.data.iloc[:, recon.locs.index]

    corr_vals = _corr_column(actual.values, recon.data.values)

    #assert np.allclose(zscore(recon_2), recon.data, equal_nan=True)
    assert 1 >= corr_vals.mean() >= -1
コード例 #3
0
def interp_corr(locs,
                corrs,
                width=10,
                vox_size=10,
                outfile=None,
                save_nii=None):
    nii = se.load('std', vox_size=vox_size)
    full_locs = nii.get_locs().values
    W = np.exp(_log_rbf(full_locs, locs, width=width))
    interp_corrs = np.dot(corrs, W.T)
    bo_nii = se.Brain(data=interp_corrs, locs=full_locs)
    nii_bo = _brain_to_nifti(bo_nii, nii)
    ni_plt.plot_glass_brain(nii_bo,
                            colorbar=True,
                            threshold=None,
                            vmax=1,
                            vmin=0)
    #ni_plt.plot_glass_brain(nii_bo, colorbar=True, threshold=None, vmax=1, vmin=0, display_mode='lyrz')

    if save_nii:
        nii_bo.save(save_nii)

    if not outfile is None:
        plt.savefig(outfile)
    else:
        plt.show()
コード例 #4
0
def density_within_r_plot(locs, r, vox_size=4, outfile=None):

    nii = se.load('std', vox_size=vox_size)
    full_locs = nii.get_locs().values
    point_tree = spatial.cKDTree(locs)
    density_locs = np.array([])

    for l in locs:
        density_locs = np.append(
            density_locs,
            np.divide(len(point_tree.query_ball_point(l, r)),
                      np.shape(locs)[0]))

    bo_nii = se.Brain(data=np.atleast_2d(density_locs), locs=locs)
    nii_bo = se.helpers._brain_to_nifti(bo_nii, nii)
    ni_plt.plot_glass_brain(nii_bo,
                            colorbar=True,
                            threshold=None,
                            vmax=.1,
                            vmin=0)

    if not outfile is None:
        plt.savefig(outfile)
    else:
        plt.show()
コード例 #5
0
def density_by_voxel_plot(locs, r=20, vox_size=4, outfile=None, save_nii=None):

    sub_nii = se.load('std', vox_size=4)
    sub_locs = sub_nii.get_locs().values

    point_tree = spatial.cKDTree(locs)
    density_locs = np.array([])

    for l in sub_locs:
        density_locs = np.append(
            density_locs,
            np.divide(len(point_tree.query_ball_point(l, r)),
                      np.shape(locs)[0]))

    bo_nii = se.Brain(data=np.atleast_2d(density_locs), locs=sub_locs)
    nii_bo = se.helpers._brain_to_nifti(bo_nii, sub_nii)
    ni_plt.plot_glass_brain(nii_bo,
                            colorbar=True,
                            threshold=None,
                            vmax=.1,
                            vmin=0,
                            display_mode='lyrz')

    if save_nii:
        nii_bo.save(save_nii)

    if not outfile is None:
        plt.savefig(outfile)
    else:
        plt.show()
コード例 #6
0
def test_nii_bo_nii():

    bo_nii = se.Brain(_gray(20))
    nii = _brain_to_nifti(bo_nii, _gray(20))
    nii_0 = _gray(20).get_data().flatten()
    nii_0[np.isnan(nii_0)] = 0
    assert np.allclose(nii_0, nii.get_data().flatten())
コード例 #7
0
def most_informative_locs_plot(df, vox_size=5, width=10, outfile=None):

    locs = compile_df_locs(df['R'])

    sub_nii = se.load('std', vox_size=vox_size)
    sub_locs = sub_nii.get_locs().values

    point_tree = spatial.cKDTree(locs)

    most_info = np.array([])

    z_df = df.copy(deep=True)
    z_df['Correlation'] = r2z(z_df['Correlation'])
    for l in sub_locs:
        most_info = np.append(
            most_info,
            z_df['Correlation'][point_tree.query_ball_point(l, width)].mean())

    bo_nii = se.Brain(data=np.atleast_2d(z2r(most_info)), locs=sub_locs)
    nii_bo = se.helpers._brain_to_nifti(bo_nii, sub_nii)
    ni_plt.plot_glass_brain(nii_bo,
                            colorbar=True,
                            threshold=None,
                            vmax=1,
                            vmin=0,
                            display_mode='lyrz')
    if not outfile is None:
        plt.savefig(outfile)
    else:
        plt.show()
コード例 #8
0
def plot_cluster_centers(mask, node_color='k', node_size=10, **kwargs):
    mask_bo = se.Brain(mask)
    data = mask_bo.data
    locs = mask_bo.locs

    centers = np.zeros([data.shape[0], locs.shape[1]])
    for k in np.arange(data.shape[0]):
        centers[k, :] = locs.iloc[np.where(data.iloc[k, :] == 1)[0], :].mean(
            axis=0)

    nl.plotting.plot_connectome(np.eye(centers.shape[0]),
                                centers,
                                node_color=node_color,
                                node_size=node_size,
                                **kwargs)
    return centers
コード例 #9
0
ファイル: file_io.py プロジェクト: tmuntianu/supereeg_paper
def npz2bo(infile):

    with open(infile, 'rb') as handle:
        f = np.load(handle)
        f_name = os.path.splitext(os.path.basename(infile))[0]
        data = f['Y']
        sample_rate = f['samplerate']
        sessions = f['fname_labels']
        locs = tal2mni(f['R'])
        meta = f_name

    return se.Brain(data=data,
                    locs=locs,
                    sessions=sessions,
                    sample_rate=sample_rate,
                    meta=meta)
コード例 #10
0
# create brain object from the remaining locations - first find remaining 25 locations
sub_locs = locs[~locs.index.isin(mo_locs.index)]

# create a brain object with all gray locations
bo = se.simulate_bo(n_samples=1000,
                    sample_rate=100,
                    locs=locs,
                    noise=noise,
                    random_seed=random_seed)

# parse brain object to create synthetic patient data
data = bo.data.iloc[:, sub_locs.index]

# put data and locations together in new sample brain object
bo_sample = se.Brain(data=data.values, locs=sub_locs, sample_rate=100)

# predict activity at all unknown locations
recon = model.predict(bo_sample, nearest_neighbor=False)

# get reconstructed indices
recon_labels = np.where(np.array(recon.label) != 'observed')

# actual = bo.data.iloc[:, unknown_ind]
actual_data = bo.get_zscore_data()[:, recon_labels[0]]

recon_data = recon[:, recon_labels[0]].get_data().values
corr_vals = _corr_column(actual_data, recon_data)

print('case 1 (null set) correlation = ' + str(corr_vals.mean()))
コード例 #11
0
    try:
        fname = os.path.splitext(os.path.basename(bo_file))[0]

        bo_f_file = os.path.join(config['resultsdir'],
                                 fname + '_' + freq + '.bo')

        if not os.path.exists(bo_f_file):

            bo = se.load(bo_file)
            bo.filter = None

            # f_data = butter_filt(bo.data.values, 60, bo.sample_rate[0])
            f_data = power_breakdown(bo.data.values, freq_range, SAMPLE_RATE)

            bo_f = se.Brain(data=f_data,
                            locs=bo.locs,
                            sample_rate=bo.sample_rate,
                            sessions=bo.sessions.values,
                            kurtosis=bo.kurtosis)

            bo_f.save(bo_f_file)

            print('saving: ' + bo_f_file)

        else:
            print(bo_f_file + ' already exists')

    except:
        print('issue with ' + bo_file)
        traceback.print_exc()
コード例 #12
0
        # brain object locations subsetted entirely from both model and gray locations
        sub_locs = locs.sample(n).sort_values(['x', 'y', 'z'])

        # simulate brain object
        bo = se.simulate_bo(n_samples=1000,
                            sample_rate=100,
                            locs=locs,
                            noise=.3)

        # parse brain object to create synthetic patient data
        data = bo.data.iloc[:, sub_locs.index]

        # create synthetic patient (will compare remaining activations to predictions)
        bo_sample = se.Brain(data=data.as_matrix(),
                             sample_rate=100,
                             locs=sub_locs)

        # reconstruct at 'unknown' locations
        bo_r = model.predict(bo_sample)

        # find the reconstructed indices
        recon_inds = [
            i for i, x in enumerate(bo_r.label) if x == 'reconstructed'
        ]

        # sample reconstructed data a reconstructed indices
        recon = bo_r.data.iloc[:, recon_inds]

        # sample actual data at reconstructed locations
        actual = bo.data.iloc[:, recon_inds]
コード例 #13
0
    nworkers = int(config['nnodes'] * config['ppn'] * 0.5)

    # get the indices for the chunks
    chunk_indices = array_split(powers_by_freq, nworkers, axis=2)

    mhq = Queue(nworkers)
    mh_list = []

    processes = [Process(target=helper, args=(chunk_indices[i], chunk_indices[i+1], \
            powers_by_freq, i, xs, midpoint, mhq)) for i in range(nworkers)]

    for p in processes:
        p.start()

    for i in range(nworkers):
        mh_list.append(mhq.get())

    for p in processes:
        p.join()

    # sorting the list of results into original order
    mh_list.sort(key=itemgetter(-1))
    # extracting only the ndarrays from the tuples in mh_list
    for i in range(len(mh_list)):
        mh_list[i] = mh_list[i][0]

    mh_data = np.concatenate(mh_list, axis=1)
    mh_bo = se.Brain(data=mh_data.T, locs=locs, sample_rate=sample_rate)
    mh_bo.save(
        os.path.join(config['resultsdir'],
                     os.path.split(fname)[1].split('.')[0] + '_broadband.bo'))
コード例 #14
0
ファイル: test_brain.py プロジェクト: vishalbelsare/supereeg
def test_brain_load_str():
    bo = se.Brain('std')
    assert isinstance(bo, se.Brain)
コード例 #15
0
ファイル: test_brain.py プロジェクト: vishalbelsare/supereeg
def test_brain_brain():
    bo = se.simulate_bo(n_samples=10, sample_rate=100)
    bo = se.Brain(bo)
    assert isinstance(bo, se.Brain)
コード例 #16
0
ファイル: test_brain.py プロジェクト: vishalbelsare/supereeg
from builtins import str
import pytest
import os
import supereeg as se
import numpy as np
import pandas as pd
import nibabel as nib

bo = se.simulate_bo(n_samples=10, sample_rate=100)

nii = se.load('example_nifti')
bo_n = se.Brain(nii)

mo = se.load('example_model')
bo_m = se.Brain(mo)

def test_create_bo():
    assert isinstance(bo, se.Brain)

def test_bo_data_nifti():
    assert isinstance(bo_n, se.Brain)

def test_bo_data_model():
    assert isinstance(bo_m, se.Brain)

def test_bo_data_df():
    assert isinstance(bo.data, pd.DataFrame)

def test_bo_locs_df():
    assert isinstance(bo.locs, pd.DataFrame)
コード例 #17
0
ファイル: test_brain.py プロジェクト: vishalbelsare/supereeg
def test_brain_filter():
    data = np.random.rand(10, 2)
    locs = np.random.rand(2, 3)
    bo = se.Brain(data=data, locs=locs, filter=None, sample_rate=1000)
    assert bo.get_data().shape==(10,2)
    assert bo.get_locs().shape==(2,3)
コード例 #18
0
ファイル: full_mats.py プロジェクト: tmuntianu/supereeg_paper
        except:
            numtries += 1
            time.sleep(5)
    bo = se.load(sys.argv[1])

    # load original brain object
    og_fname = os.path.join(config['og_bodir'], fname.split('_' + freq)[0] + '.bo')
    try:
        og_bo = se.load(og_fname)
    except:
        og_bo = se.load(sys.argv[1])
    og_bo.update_filter_inds()

    # turn it into fancy ~BandBrain~
    bo = BandBrain(bo, og_bo=og_bo, og_filter_inds=og_bo.filter_inds)

    # filter
    bo.apply_filter()

    # turn it back into a vanilla Brain
    bo = se.Brain(bo)

    # make model
    mo = se.Model(bo, locs=R)

    # save model
    mo.save(os.path.join(results_dir, fname))

else:
    print('skipping model (not enough electrodes pass kurtosis threshold): ' + sys.argv[1])
コード例 #19
0
theta = freqs[8:17]
alpha = freqs[17:22]
beta = freqs[22:33]
lgamma = freqs[33:42]
hgamma = freqs[42:50]

bands = [delta, theta, alpha, beta, lgamma, hgamma]



peak_deviations = np.zeros(shape=bo.data.T.shape)

for i, band in enumerate(bands):
    for electrode in range(0, len(bo.data.T)):
        wav_transform, sj, wavelet_freqs, coi, fft, fftfreqs = wavelet.cwt(bo.data[electrode], 1/bo.sample_rate[0], freqs = band, wavelet=wavelet.Morlet(4))
        raw_power = np.square(np.abs(wav_transform))
        avg_power = np.average(raw_power, axis=0)
        log_power = np.log(avg_power)
        log_freqs = np.log(band)
        HR = sklearn.linear_model.HuberRegressor()
       # pdb.set_trace()
        HR.fit(log_freqs.reshape(-1,1), log_power)
        narrowband_power = log_power - (log_freqs * HR.coef_[0] + HR.intercept_)
        peak_deviations[electrode] = narrowband_power

    deviation_bo = se.Brain(data=peak_deviations.T, locs=bo.locs, sample_rate=bo.sample_rate, filter=None)
    deviation_bo.save('peakdev_band_' + str(i) + '_' + fname)

# except:
#     print('.bo file not found')
コード例 #20
0
results_dir = config['bof_datadir']

try:
    if not os.path.exists(results_dir):
        os.makedirs(results_dir)
except OSError as err:
    print(err)

fmri_dir = config['fmri_datadir']

nii = os.path.join(config['locs_resultsdir'], 'gray_3.nii')

for i in list(range(1, len(os.listdir(config['fmri_datadir'])) + 1)):

    bo_file = os.path.join(results_dir, 'sub-%d' % i + '.bo')

    if not os.path.exists(bo_file):

        try:
            ## need to do this for intact1 and intact 2!

            data, locs = nii2cmu(os.path.join(
                fmri_dir, 'sherlock_movie_s%d' % i + '.nii'),
                                 mask_file=nii)
            bo = se.Brain(data=data, locs=locs, sample_rate=1)
            bo.save(bo_file)
            print(bo.get_locs().shape)
        except:
            print(bo_file + '_issue')

print('done converting brain objects')
コード例 #21
0
import supereeg as se
from config import config
import os, glob
import numpy as np

"""
Joins split brain objects back into the full brain object
"""

files = glob.glob(os.path.join(config['datadir'], '*.bo'))
files = [os.path.split(x)[1].split('.bo')[0] for x in files]


for fname in files:
    chunks = sorted(glob.glob(os.path.join(config['splitdir'], fname + '_chunk*_broadband.bo')), \
        key=lambda path: int(path.split('_chunk')[1].split('_broad')[0]))
    if len(chunks) == 30:
        datalist = []
        for chunk in chunks:
            bo = se.load(chunk)
            locs = bo.locs
            sample_rate = bo.sample_rate
            datalist.append(bo.data.values)
            del bo
            
        data = np.concatenate(datalist)
        print(data.shape)
        se.Brain(data=data, sample_rate=sample_rate, locs=locs).save(os.path.join(config['resultsdir'], fname + '_broadband.bo'))
        del datalist
コード例 #22
0
ファイル: splitter.py プロジェクト: tmuntianu/supereeg_paper
import numpy as np
"""
Splits each brain object into smaller brain objects to avoid memory issues
"""

completed = glob.glob(os.path.join(config['resultsdir'], '*.bo'))
completed_trim_set = set(
    [os.path.split(x)[1].split('_broadband.bo')[0] for x in completed])

all_files = glob.glob(os.path.join(config['datadir'], '*.bo'))
all_files_trim_set = set(
    [os.path.split(x)[1].split('.')[0] for x in all_files])

files = list(all_files_trim_set - completed_trim_set)
files = [os.path.join(config['datadir'], x + '.bo') for x in files]

for fname in files:
    bo = se.load(fname)
    sample_rate = bo.sample_rate
    locs = bo.locs
    data_list = np.array_split(bo.data.values, 30)
    del bo
    for i, data in enumerate(data_list):
        bo = se.Brain(data=data, sample_rate=sample_rate, locs=locs)
        fname = os.path.split(fname)[1].split('.')[0]
        # bo.save(os.path.join(config['splitdir'], os.path.split(fname)[1][:-3] + '_chunk' + str(i)+ '.bo'))
        bo.save(
            os.path.join(config['splitdir'],
                         fname + '_chunk' + str(i) + '.bo'))
        del bo
コード例 #23
0
delta = freqs[0:8]
theta = freqs[8:17]
alpha = freqs[17:22]
beta = freqs[22:33]
lgamma = freqs[33:42]
hgamma = freqs[42:50]

bands = [delta, theta, alpha, beta, lgamma, hgamma]

power = np.zeros(shape=(bo.data.T.shape[0], 6000))

for electrode in range(0, len(bo.data.T)):
    #not full brain object!
    wav_transform, sj, wavelet_freqs, coi, fft, fftfreqs = wavelet.cwt(
        bo.data[electrode][36000:42000],
        1 / bo.sample_rate[0],
        freqs=freqs,
        wavelet=wavelet.Morlet(4))
    raw_power = np.square(np.abs(wav_transform))
    avg_power = np.average(raw_power, axis=0)
    power[electrode] = avg_power

power_bo = se.Brain(data=power.T,
                    locs=bo.locs,
                    sample_rate=bo.sample_rate,
                    filter=None)
power_bo.save('power_' + fname)

# except:
#     print('.bo file not found')
コード例 #24
0
                 [39., -57., 17.], [39., 3., 37.], [59., -17., 17.]])

# number of timeseries samples
n_samples = 10
# number of subjects
n_subs = 3
# number of electrodes
n_elecs = 5
# full brain object to parse and compare
bo_full = se.simulate_bo(n_samples=10, sessions=2, sample_rate=10, locs=locs)
# create brain object from subset of locations
sub_locs = bo_full.locs.iloc[6:]
sub_data = bo_full.data.iloc[:, sub_locs.index]
bo = se.Brain(data=sub_data.as_matrix(),
              sessions=bo_full.sessions,
              locs=sub_locs,
              sample_rate=10,
              meta={'brain object locs sampled': 2})
# simulate correlation matrix
data = [
    se.simulate_model_bos(n_samples=10, locs=locs, sample_locs=n_elecs)
    for x in range(n_subs)
]
# test model to compare
test_model = se.Model(data=data, locs=locs, rbf_width=100)
bo_nii = se.Brain(_gray(20))
nii = _brain_to_nifti(bo_nii, _gray(20))

a = np.array([[1, 2, 3], [4, 5, 6], [
    7,
    8,