示例#1
0
# sum_dot_y, constregs=0 for 1st dot,
# label_tc normalised across trials, times and subjects
basefile = 'source_sequential_201707031206.h5'

r_name = 'dot_x'

file = os.path.join(ss.bem_dir, basefile[:-3] + '_slabs_%s.h5' % r_name)
second_level = pd.read_hdf(file, 'second_level_src')
options = pd.read_hdf(file, 'options')

testlabel = 'L_v23ab_ROI-lh'
testtimes = [120, 170, 320, 400]
testtheta = 0.5
N = 100

S = helpers.find_available_subjects(megdatadir=helpers.megdatadir).size

#%% create Stan model
folded_normal_stan = """
data {
 int<lower = 0> S;
 real y[S];
}

parameters {
  real<lower=0> mu;
  real<lower=0> sigma;
  real<lower=0, upper=1> theta;
}

model {
#trial_info.support_correct = trial_info.loc[randind, 'support_correct'].values
#trial_info.support_correct_bin = trial_info.loc[randind, 'support_correct_bin'].values

# use "None" for all subjects; for a single subject (e.g. "[24]") plots will
# be shown
subjects = 3
if len(sys.argv) > 1:
    if sys.argv[1] == 'None':
        subjects = None
    elif sys.argv[1][0] == '[':
        subjects = list(map(int, sys.argv[1][1:-1].split(',')))
    else:
        subjects = [int(sys.argv[1])]

if subjects is None:
    subjects = helpers.find_available_subjects(helpers.behavdatadir)
subjects = np.sort(np.atleast_1d(subjects))
S = subjects.size

subject_info = pd.DataFrame([], index=pd.Index(subjects, name='subject'))

for si, sub in enumerate(subjects):
    respRT = helpers.load_subject_data(sub)
    respRT_ext = pd.concat([respRT, trial_info], axis=1)

    #%% drop trials in which the subject responded too fast
    respRT_ext.drop(respRT_ext.index[respRT_ext.RT < 0.55], inplace=True)

    #%% check whether subject responded correctly according to first 4 dots
    respRT_ext['is_correct_4th'] = (
        respRT_ext.correct_4th == respRT_ext.response).astype(int)
示例#3
0
    pars.add_param('bound', 0, 1, gaussprob(width=0.5, shift=0.5))
if 'bias' not in fix.keys():
    pars.add_param('bias', 0, .2, gaussprob())
if 'ndtloc' not in fix.keys():
    pars.add_param('ndtloc', -2, 1)
if 'ndtspread' not in fix.keys():
    pars.add_param('ndtspread', np.log(0.2), 1, exponential())
if 'lapseprob' not in fix.keys():
    pars.add_param('lapseprob', -1.65, 1, gaussprob())  # median approx at 0.05
if 'lapsetoprob' not in fix.keys():
    pars.add_param('lapsetoprob', 0, 1, gaussprob())

prior = snl.pdfs.Gaussian(m=pars.mu, S=pars.cov)

#%%
subjects = helpers.find_available_subjects(
    eegdir=helpers.eegdir if use_liks else None)
#subjects = [18]

options = dict(use_liks=use_liks,
               censor_late=True,
               exclude_to=False,
               stats='hist')

resdir = os.path.join(helpers.resultsdir, 'snl', 'rotated_directions',
                      pd.datetime.now().strftime('%Y%m%d%H%M'))
os.mkdir(resdir)

for sub in subjects:
    if options['stats'] == 'id':
        data = helpers.load_subject(sub,
                                    exclude_to=options['exclude_to'],
示例#4
0
    store['scalar_opt'] = pd.Series(
        dict(exclude_to=exclude_to,
             censor_late=censor_late,
             B=B,
             S=S,
             PS=PS,
             p_outlier=p_outlier,
             bias=bias))
    store['depends_on'] = pd.Series(depends_on)

#%% some helpers for data storage
is_easy = lambda s: re.match(r'wfpt\((\w+)\.\d\)', s).group(1) == 'True'
get_stim = lambda s: int(s[-2:-1])

#%% loop over subjects
for sub in helpers.find_available_subjects():
    print("\ninferring for subject %2d ..." % sub)

    data = helpers.load_subject(sub,
                                exclude_to=exclude_to,
                                censor_late=censor_late)

    stim = data.response.copy().astype(int)
    err = data.error.astype(bool)
    stim[err] = -stim[err]

    hddm_data = pd.DataFrame(
        [
            data.RT.values, (data.response + 1) / 2,
            (stim + 1) / 2, sub * np.ones(data.shape[0]), data.easy.values
        ],
示例#5
0
"""
Created on Fri Jan 20 18:45:48 2017

@author: bitzer
"""

import os
import numpy as np
import pandas as pd
import helpers
import posterior_model_measures
import scipy.stats


#%%
subjecti = pd.Index(helpers.find_available_subjects(behavdatadir=
    helpers.behavdatadir), name='subject')
triali = pd.Index(np.arange(480) + 1, name='trial')
doti = pd.Index(np.arange(25)+1, name='dot')

dotpos = helpers.load_dots()
trial_info = helpers.get_5th_dot_infos(dotpos)

infresult_file = 'infres_collapse_201612121759.npz'


#%% trial

# correct
subinfo = helpers.load_subject_data(2)
trial = pd.DataFrame(subinfo['stimulus']).sort_index()
trial.rename(columns={'stimulus': 'correct'}, inplace=True)