Esempio n. 1
0
def recon_elec(elec_ind):
    recon_outfile_across = os.path.join(
        results_dir,
        os.path.basename(
            os.path.splitext(bo_fname)[0] + '_' + str(elec_ind) + '.npz'))
    recon_outfile_within = os.path.join(
        results_dir,
        os.path.basename(
            os.path.splitext(bo_fname)[0] + '_' + str(elec_ind) +
            '_within.npz'))
    if os.path.exists(recon_outfile_across) and os.path.exists(
            recon_outfile_within):
        return

    freq_bo = se.load(bo_fname)
    bo = BandBrain(freq_bo, og_bo)
    electrode = bo.get_locs().iloc[elec_ind]

    R_K_subj = bo.get_locs().values

    R_K_removed, other_inds = remove_electrode(R_K_subj, R_K_subj, elec_ind)

    known_inds, unknown_inds, e_ind = known_unknown(R, R_K_removed, R_K_subj,
                                                    elec_ind)

    electrode_ind = get_rows(R, electrode.values)
    actual = bo[:, elec_ind]
    bo = bo[:, other_inds]

    print('bo indexed for ' + str(elec_ind))

    if not os.path.exists(recon_outfile_across):
        bo_r = mo_s.predict(bo, recon_loc_inds=e_ind)

        print(electrode)
        c = _corr_column(bo_r.data.as_matrix(), actual.get_zscore_data())

        print(c)

        np.savez(recon_outfile_across, coord=electrode, corrs=c)

    if not os.path.exists(recon_outfile_within):

        Model = se.Model(bo, locs=R_K_subj)

        m_locs = Model.get_locs().as_matrix()
        known_inds, unknown_inds, e_ind = known_unknown(
            m_locs, R_K_removed, m_locs, elec_ind)
        bo_r = Model.predict(bo)

        bo_r = bo_r[:, unknown_inds]

        print(electrode)

        c = _corr_column(bo_r.data.as_matrix(), actual.get_zscore_data())

        print(c)

        np.savez(recon_outfile_within, coord=electrode, corrs=c)
Esempio n. 2
0
def test_electrode_contingencies_3_locations_can_subset():

    random_seed = np.random.seed(123)
    noise = 0

    # load mini model
    gray = se.Brain(se.load('gray', vox_size=20))

    # extract 20 locations
    gray_locs = gray.locs.iloc[:5]

    # create model from 10 locations
    mo_locs = gray_locs.sample(4, random_state=random_seed).sort_values(
        ['x', 'y', 'z'])

    # create covariance matrix from random seed
    c = se.create_cov(cov='random', n_elecs=5)

    # pull out model from covariance matrix
    data = c[:, mo_locs.index][mo_locs.index, :]

    # create model from subsetted covariance matrix and locations
    model = se.Model(numerator=np.array(data),
                     denominator=np.ones(np.shape(data)),
                     locs=mo_locs,
                     n_subs=1)

    # create brain object from the remaining locations - first find remaining locations
    sub_locs = gray_locs[~gray_locs.index.isin(mo_locs.index)]

    sub_locs = sub_locs.append(
        gray_locs.sample(1,
                         random_state=random_seed).sort_values(['x', 'y',
                                                                'z']))

    # create a brain object with all gray locations
    bo = se.simulate_bo(n_samples=5,
                        sample_rate=1000,
                        locs=gray_locs,
                        noise=noise,
                        random_seed=random_seed)

    # parse brain object to create synthetic patient data
    data = bo.data.iloc[:, sub_locs.index]

    # put data and locations together in new sample brain object
    bo_sample = se.Brain(data=data.as_matrix(),
                         locs=sub_locs,
                         sample_rate=1000)

    # predict activity at all unknown locations
    recon = model.predict(bo_sample, nearest_neighbor=False)

    # actual = bo.data.iloc[:, unknown_ind]
    actual = bo.data.iloc[:, recon.locs.index]

    corr_vals = _corr_column(actual.as_matrix(), recon.data.as_matrix())

    assert 1 >= corr_vals.mean() >= -1
    assert np.allclose(zscore(recon_3), recon.data, equal_nan=True)
Esempio n. 3
0
def test_reconstruct():
    recon_test = test_model.predict(bo,
                                    nearest_neighbor=False,
                                    force_update=True)
    actual_test = bo_full.data.iloc[:, recon_test.locs.index]

    # actual_test: the true data
    # recon_test: the reconstructed data (using Model.predict)
    corr_vals = _corr_column(actual_test.values, recon_test.data.values)
    assert np.all(corr_vals[~np.isnan(corr_vals)] <= 1) and np.all(
        corr_vals[~np.isnan(corr_vals)] >= -1)
Esempio n. 4
0
def test_reconstruct():
    recon_test = test_model.predict(bo, nearest_neighbor=False, force_update=True)
    actual_test = bo_full.data.iloc[:, recon_test.locs.index]
    zbo = copy.copy(bo)
    zbo.data = pd.DataFrame(bo.get_zscore_data())
    mo = test_model.update(zbo, inplace=False)
    model_corrmat_x = np.divide(mo.numerator, mo.denominator)
    model_corrmat_x = _z2r(model_corrmat_x)
    np.fill_diagonal(model_corrmat_x, 0)
    recon_data = _timeseries_recon(zbo, model_corrmat_x)
    corr_vals = _corr_column(actual_test.as_matrix(), recon_test.data.as_matrix())
    assert isinstance(recon_data, np.ndarray)
    assert np.allclose(recon_data, recon_test.data, equal_nan=True)
    assert 1 >= corr_vals.mean() >= -1
Esempio n. 5
0
def test_electrode_contingencies_2_subset():

    random_seed = np.random.seed(123)

    noise = 0

    gray = se.Brain(se.load('gray', vox_size=20))

    # extract locations
    gray_locs = gray.locs.iloc[:5]

    mo_locs = gray_locs

    c = se.create_cov(cov='random', n_elecs=5)

    data = c[:, mo_locs.index][mo_locs.index, :]

    model = se.Model(numerator=np.array(data),
                     denominator=np.ones(np.shape(data)),
                     locs=mo_locs,
                     n_subs=1)

    # create brain object from the remaining locations - first find remaining locations
    sub_locs = mo_locs.sample(2, random_state=random_seed).sort_values(
        ['x', 'y', 'z'])

    # create a brain object with all gray locations
    bo = se.simulate_bo(n_samples=5,
                        sample_rate=1000,
                        locs=gray_locs,
                        noise=noise,
                        random_seed=random_seed)

    # parse brain object to create synthetic patient data
    data = bo.data.iloc[:, sub_locs.index]

    # put data and locations together in new sample brain object
    bo_sample = se.Brain(data=data.values, locs=sub_locs, sample_rate=1000)

    # predict activity at all unknown locations
    recon = model.predict(bo_sample, nearest_neighbor=False)

    actual = bo.data.iloc[:, recon.locs.index]

    corr_vals = _corr_column(actual.values, recon.data.values)

    #assert np.allclose(zscore(recon_2), recon.data, equal_nan=True)
    assert 1 >= corr_vals.mean() >= -1
Esempio n. 6
0
data = bo.data.iloc[:, sub_locs.index]

# put data and locations together in new sample brain object
bo_sample = se.Brain(data=data.values, locs=sub_locs, sample_rate=100)

# predict activity at all unknown locations
recon = model.predict(bo_sample, nearest_neighbor=False)

# get reconstructed indices
recon_labels = np.where(np.array(recon.label) != 'observed')

# actual = bo.data.iloc[:, unknown_ind]
actual_data = bo.get_zscore_data()[:, recon_labels[0]]

recon_data = recon[:, recon_labels[0]].get_data().values
corr_vals = _corr_column(actual_data, recon_data)

print('case 1 (null set) correlation = ' + str(corr_vals.mean()))

########## debug case 2 - subset ##################

# set random seed to default and noise to 0
random_seed = np.random.seed(123)
noise = 0

# locs
locs = se.simulate_locations(n_elecs=100, set_random_seed=random_seed)

# create model locs from 50 locations
mo_locs = locs.sample(100,
                      random_state=random_seed).sort_values(['x', 'y', 'z'])
Esempio n. 7
0
        # reconstruct at 'unknown' locations
        bo_r = model.predict(bo_sample)

        # find the reconstructed indices
        recon_inds = [
            i for i, x in enumerate(bo_r.label) if x == 'reconstructed'
        ]

        # sample reconstructed data a reconstructed indices
        recon = bo_r.data.iloc[:, recon_inds]

        # sample actual data at reconstructed locations
        actual = bo.data.iloc[:, recon_inds]

        # correlate reconstruction with actual data
        corr_vals = _corr_column(actual.as_matrix(), recon.as_matrix())
        corr_vals_sample = np.random.choice(corr_vals, 5)

        d.append({
            'Subjects in model': p,
            'Electrodes per subject in model': m,
            'Electrodes per reconstructed subject': n,
            'Average Correlation': corr_vals_sample.mean(),
            'Correlations': corr_vals
        })

    d = pd.DataFrame(d,
                     columns=[
                         'Subjects in model',
                         'Electrodes per subject in model',
                         'Electrodes per reconstructed subject',
Esempio n. 8
0
                    noise=.1)

# sample 10 locations, and get indices
sub_locs = locs.sample(90,
                       replace=False).sort_values(['x', 'y',
                                                   'z']).index.values.tolist()

# index brain object to get sample patient
bo_sample = bo[:, sub_locs]

# plot sample patient locations
bo_sample.plot_locs()

# plot sample patient data
bo_sample.plot_data()

# make model from brain object
r_model = se.Model(data=bo, locs=locs)

# predict
bo_s = r_model.predict(bo_sample, nearest_neighbor=False)

# find indices for reconstructed locations
recon_labels = np.where(np.array(bo_s.label) != 'observed')

# find correlations between predicted and actual data
corrs = _corr_column(bo.get_data().as_matrix(), bo_s.get_data().as_matrix())

# index reconstructed correlations
corrs[recon_labels].mean()
Esempio n. 9
0
                    noise=.1)

# sample 10 locations, and get indices
sub_locs = locs.sample(90,
                       replace=False).sort_values(['x', 'y',
                                                   'z']).index.values.tolist()

# index brain object to get sample patient
bo_sample = bo[:, sub_locs]

# plot sample patient locations
bo_sample.plot_locs()

# plot sample patient data
bo_sample.plot_data()

# make model from brain object
r_model = se.Model(data=bo, locs=locs)

# predict
bo_s = r_model.predict(bo_sample, nearest_neighbor=False)

# find indices for reconstructed locations
recon_labels = np.where(np.array(bo_s.label) != 'observed')

# find correlations between predicted and actual data
corrs = _corr_column(bo.get_data().values, bo_s.get_data().values)

# index reconstructed correlations
corrs[recon_labels].mean()
Esempio n. 10
0
def test_corr_column():
    X = np.matrix([[1, 2, 3], [1, 2, 3]])
    corr_vals = _corr_column(np.array([[.1, .4], [.2, .5], [.3, .6]]),
                             np.array([[.1, .4], [.2, .5], [.3, .6]]))
    print(corr_vals)
    assert isinstance(corr_vals, (float, np.ndarray))
Esempio n. 11
0
electrode_ind = get_rows(R, electrode.values)
actual = bo[:, elec_ind]
bo = bo[:, other_inds]

print('bo indexed')

# recon_outfile_across = os.path.join(results_dir, os.path.basename(sys.argv[1][:-3] + '_' + sys.argv[2] + '.npz'))

# recon_outfile_within = os.path.join(results_dir, os.path.basename(sys.argv[1][:-3] + '_' + sys.argv[2] + '_within.npz'))

if not os.path.exists(recon_outfile_across):
    bo_r = mo_s.predict(bo, recon_loc_inds=e_ind)

    print(bo_r.get_locs())
    print(electrode)
    c = _corr_column(bo_r.data.values, actual.get_zscore_data())

    print(c)

    np.savez(recon_outfile_across, coord=electrode, corrs=c)

else:
    print('across reconstructions are done')

if not os.path.exists(recon_outfile_within):

    Model = se.Model(bo, locs=R_K_subj)

    m_locs = Model.get_locs().values
    known_inds, unknown_inds, e_ind = known_unknown(m_locs, R_K_removed,
                                                    m_locs, elec_ind)
Esempio n. 12
0
bo = se.load(bo_fname)
bo.filter=None


R_subj = bo.get_locs().values
R_sub_subj = bo_s.get_locs().values


known_inds, unknown_inds = known_unknown(R_subj, R, R)

actual = bo[:,known_inds]

print('bo indexed')

recon_outfile = os.path.join(results_dir, os.path.basename(file_name + '.npz'))

bo_r = mo.predict(bo_s)

R_actual = actual.get_locs().values

R_recon = bo_r.get_locs().values

known_inds, unknown_inds = known_unknown(R_recon, R_actual)

bo_r = bo_r[:,known_inds]

c = _corr_column(bo_r.data.as_matrix(), actual.get_zscore_data())

print(c)

np.savez(recon_outfile, corrs=c, locs=bo_r.get_locs().values)
Esempio n. 13
0
                             locs=sub_locs)

        # reconstruct at 'unknown' locations
        bo_r = model.predict(bo_sample)

        # find the reconstructed indices
        recon_inds = [i for i, x in enumerate(bo_r.label) if x != 'observed']

        # sample reconstructed data a reconstructed indices
        recon = bo_r[:, recon_inds]

        # sample actual data at reconstructed locations
        actual = bo[:, recon_inds]

        # correlate reconstruction with actual data
        corr_vals = _corr_column(actual.get_data().values,
                                 recon.get_data().values)

        d.append({
            'Time': t,
            'Noise': no,
            'Correlations': corr_vals.mean(),
            'Patients': p
        })

    d = pd.DataFrame(d, columns=['Time', 'Noise', 'Correlations', 'Patients'])
    append_d = append_d.append(d)
    append_d.index.rename('Iteration', inplace=True)

fig, axs = plt.subplots(ncols=len(np.unique(append_d['Patients'])),
                        sharex=True,
                        sharey=True)