def test_parse(): """Test .tab parsing """ with ExperimentController(*std_args, stim_fs=44100, **std_kwargs) as ec: ec.identify_trial(ec_id="one", ttl_id=[0]) ec.start_stimulus() ec.write_data_line("misc", "trial one") ec.stop() ec.trial_ok() ec.write_data_line("misc", "between trials") ec.identify_trial(ec_id="two", ttl_id=[1]) ec.start_stimulus() ec.write_data_line("misc", "trial two") ec.stop() ec.trial_ok() ec.write_data_line("misc", "end of experiment") assert_raises(ValueError, read_tab, ec.data_fname, group_start="foo") assert_raises(ValueError, read_tab, ec.data_fname, group_end="foo") assert_raises(ValueError, read_tab, ec.data_fname, group_end="trial_id") assert_raises(RuntimeError, read_tab, ec.data_fname, group_end="misc") data = read_tab(ec.data_fname) keys = list(data[0].keys()) assert_equal(len(keys), 6) for key in ["trial_id", "flip", "play", "stop", "misc", "trial_ok"]: assert_in(key, keys) assert_equal(len(data[0]["misc"]), 1) assert_equal(len(data[1]["misc"]), 1) data = read_tab(ec.data_fname, group_end=None) assert_equal(len(data[0]["misc"]), 2) # includes between-trials stuff assert_equal(len(data[1]["misc"]), 2)
def test_parse(hide_window): """Test .tab parsing.""" with ExperimentController(*std_args, **std_kwargs) as ec: ec.identify_trial(ec_id='one', ttl_id=[0]) ec.start_stimulus() ec.write_data_line('misc', 'trial one') ec.stop() ec.trial_ok() ec.write_data_line('misc', 'between trials') ec.identify_trial(ec_id='two', ttl_id=[1]) ec.start_stimulus() ec.write_data_line('misc', 'trial two') ec.stop() ec.trial_ok() ec.write_data_line('misc', 'end of experiment') pytest.raises(ValueError, read_tab, ec.data_fname, group_start='foo') pytest.raises(ValueError, read_tab, ec.data_fname, group_end='foo') pytest.raises(ValueError, read_tab, ec.data_fname, group_end='trial_id') pytest.raises(RuntimeError, read_tab, ec.data_fname, group_end='misc') data = read_tab(ec.data_fname) keys = list(data[0].keys()) assert_equal(len(keys), 6) for key in ['trial_id', 'flip', 'play', 'stop', 'misc', 'trial_ok']: assert key in keys assert_equal(len(data[0]['misc']), 1) assert_equal(len(data[1]['misc']), 1) data, params = read_tab(ec.data_fname, group_end=None, return_params=True) assert_equal(len(data[0]['misc']), 2) # includes between-trials stuff assert_equal(len(data[1]['misc']), 2) assert_equal(params['version'], 'dev') assert_equal(params['version_used'], __version__) assert (params['file'].endswith('test_parse.py'))
def test_parse(): """Test .tab parsing """ with ExperimentController(*std_args, stim_fs=44100, **std_kwargs) as ec: ec.identify_trial(ec_id='one', ttl_id=[0]) ec.start_stimulus() ec.write_data_line('misc', 'trial one') ec.stop() ec.trial_ok() ec.write_data_line('misc', 'between trials') ec.identify_trial(ec_id='two', ttl_id=[1]) ec.start_stimulus() ec.write_data_line('misc', 'trial two') ec.stop() ec.trial_ok() ec.write_data_line('misc', 'end of experiment') assert_raises(ValueError, read_tab, ec.data_fname, group_start='foo') assert_raises(ValueError, read_tab, ec.data_fname, group_end='foo') assert_raises(ValueError, read_tab, ec.data_fname, group_end='trial_id') assert_raises(RuntimeError, read_tab, ec.data_fname, group_end='misc') data = read_tab(ec.data_fname) keys = list(data[0].keys()) assert_equal(len(keys), 6) for key in ['trial_id', 'flip', 'play', 'stop', 'misc', 'trial_ok']: assert_in(key, keys) assert_equal(len(data[0]['misc']), 1) assert_equal(len(data[1]['misc']), 1) data = read_tab(ec.data_fname, group_end=None) assert_equal(len(data[0]['misc']), 2) # includes between-trials stuff assert_equal(len(data[1]['misc']), 2)
def test_parse_basic(hide_window, tmpdir): """Test .tab parsing.""" with ExperimentController(*std_args, **std_kwargs) as ec: ec.identify_trial(ec_id='one', ttl_id=[0]) ec.start_stimulus() ec.write_data_line('misc', 'trial one') ec.stop() ec.trial_ok() ec.write_data_line('misc', 'between trials') ec.identify_trial(ec_id='two', ttl_id=[1]) ec.start_stimulus() ec.write_data_line('misc', 'trial two') ec.stop() ec.trial_ok() ec.write_data_line('misc', 'end of experiment') pytest.raises(ValueError, read_tab, ec.data_fname, group_start='foo') pytest.raises(ValueError, read_tab, ec.data_fname, group_end='foo') pytest.raises(ValueError, read_tab, ec.data_fname, group_end='trial_id') pytest.raises(RuntimeError, read_tab, ec.data_fname, group_end='misc') data = read_tab(ec.data_fname) keys = list(data[0].keys()) assert_equal(len(keys), 6) for key in ['trial_id', 'flip', 'play', 'stop', 'misc', 'trial_ok']: assert key in keys assert_equal(len(data[0]['misc']), 1) assert_equal(len(data[1]['misc']), 1) data, params = read_tab(ec.data_fname, group_end=None, return_params=True) assert_equal(len(data[0]['misc']), 2) # includes between-trials stuff assert_equal(len(data[1]['misc']), 2) assert_equal(params['version'], 'dev') assert_equal(params['version_used'], __version__) assert (params['file'].endswith('test_parse.py')) # handle old files where the last trial_ok was missing bad_fname = str(tmpdir.join('bad.tab')) with open(ec.data_fname, 'r') as fid: lines = fid.readlines() assert 'trial_ok' in lines[-3] with open(bad_fname, 'w') as fid: # we used to write JSON badly fid.write(lines[0].replace('"', "'")) # and then sometimes missed the last trial_ok for line in lines[1:-3]: fid.write(line) with pytest.raises(RuntimeError, match='bad bounds'): read_tab(bad_fname) data, params = read_tab(ec.data_fname, return_params=True) data_2, params_2 = read_tab(bad_fname, return_params=True, allow_last_missing=True) assert params == params_2 t = data[-1].pop('trial_ok') t_2 = data_2[-1].pop('trial_ok') assert t != t_2 assert data_2 == data
hmfx = np.zeros((len(subjects), 4), int) avg_rt = np.zeros(len(subjects)) ppi = [] corr = [] dts = [] for si, subj in enumerate(subjects): print(' Reading data for %s...' % subj) # # cleaning w/r/t button presses # fnames = glob.glob(op.join(work_dir, 'data', subj + '*.tab')) assert len(fnames) == 1 # this includes both phases of the experiment (dyn range & tone resp) exp_data = read_tab(fnames[0], group_end=None) assert len(exp_data) == 318 exp_data = exp_data[-300:] # restrict to tone experiment flip_times = np.array([float(d['flip'][0][1]) for d in exp_data]) press_times = np.array( [float(d['keypress'][-1][1]) for d in exp_data if d['keypress']]) # exclude presses after the end of experiment: press_times = press_times[press_times < flip_times[-1]] # get index of flips (=trial starts) immediately preceded by presses press_idx = np.searchsorted(flip_times, press_times) # get latency between presses and following flips press_to_flip = np.array( [flip_times[ii] - p for ii, p in zip(press_idx, press_times)]) # mark for exclusion any trials that are too soon after presses post_press_idx = press_idx[press_to_flip < press_back] #
stim_nums = list() print('Subject {}...'.format(subj)) # find files for this subj fnames = get_pupil_data_files(subj, indir) n_files_expected = list(np.arange(1, 3) + len(params['block_trials'])) assert len(fnames) in n_files_expected ix = 0 - len(params['block_trials']) fnames = fnames[ix:] # first blocks are training & pupil response function # subject's expyfun log subj_tab = glob(op.join(indir, '{}_*.tab'.format(subj))) assert len(subj_tab) == 1 subj_tab = subj_tab[0] with open(subj_tab, 'r') as fid: session = int(eval(fid.readline().strip()[2:])['session']) - 1 subj_tab = read_tab(subj_tab) subj_tab = subj_tab[-n_trials:] stim_onset_times = [s['play'][0][1] for s in subj_tab] print(' Loading block', end=' ') for run_ix, fname in enumerate(fnames): print(str(run_ix + 1), end=' ') raw = read_raw(fname) assert raw.info['sfreq'] == fs_in raw.remove_blink_artifacts() raws.append(raw) # get the stimulus numbers presented in this block this_stim_nums = \ params['block_trials'][params['blocks'][session][run_ix]] stim_nums.extend(this_stim_nums) this_cond_mat = cond_mat[this_stim_nums]
(len(subjects), 2, 2, 2, 2) ) #array of subjects circle false alarm detection with the count in appropriate condition dprime_subjects = np.zeros((len(subjects), 5)) dprime_diss = np.zeros((len(subjects), 4)) pc_disagree_conditions = np.zeros( (3, len(subjects))) #used for T-test at end of code subjects_trials_correct = [] subjects_trials_response = [] subjects_trials_info = [] subject_ids = [] bias_diss = np.zeros((len(subjects), 4)) for j in range(len(subjects)): fname = path + subjects[j] + '_flip_detect.tab' data = read_tab(fname, group_start='trial_id', group_end='trial_ok', return_params=False) # t_mod: 0: not mod, 1: mod # m_mod 0: not mod, 1:mod # angle 0: normal, 1:flipped # match: 0: masker, 1: target correct_tracker = np.zeros((2, 2, 2, 2)) repmod_tracker = np.zeros((2, 2, 2, 2)) count_tracker = np.zeros((2, 2, 2, 2)) false_alarm_circle = np.zeros((2, 2, 2, 2)) trials_info = [] trials_repmod = [] trials_correct = []
hmfx = np.zeros((len(subjects), 4), int) avg_rt = np.zeros(len(subjects)) ppi = [] corr = [] dts = [] for si, subj in enumerate(subjects): print(' Reading data for %s...' % subj) # # cleaning w/r/t button presses # fnames = glob.glob(op.join(work_dir, 'data', subj + '*.tab')) assert len(fnames) == 1 # this includes both phases of the experiment (dyn range & tone resp) exp_data = read_tab(fnames[0], group_end=None) assert len(exp_data) == 318 exp_data = exp_data[-300:] # restrict to tone experiment flip_times = np.array([float(d['flip'][0][1]) for d in exp_data]) press_times = np.array([float(d['keypress'][-1][1]) for d in exp_data if d['keypress']]) # exclude presses after the end of experiment: press_times = press_times[press_times < flip_times[-1]] # get index of flips (=trial starts) immediately preceded by presses press_idx = np.searchsorted(flip_times, press_times) # get latency between presses and following flips press_to_flip = np.array([flip_times[ii] - p for ii, p in zip(press_idx, press_times)]) # mark for exclusion any trials that are too soon after presses post_press_idx = press_idx[press_to_flip < press_back] #
# -*- coding: utf-8 -*- """ ============ Parsing demo ============ This example shows some of the functionality of ``read_tab``. """ # Author: Eric Larson <*****@*****.**> # # License: BSD (3-clause) import ast from expyfun.io import read_tab print(__doc__) data = read_tab('sample.tab') # from simple_experiment print('Number of trials: %s' % len(data)) keys = list(data[0].keys()) print('Data keys: %s\n' % keys) for di, d in enumerate(data): if d['trial_id'][0][0] == 'multi-tone': print('Trial %s multi-tone' % (di + 1)) targs = ast.literal_eval(d['multi-tone trial'][0][0]) presses = [int(k[0]) for k in d['keypress']] print(' Targs: %s\n Press: %s' % (targs, presses))
def score(p, subjects): """Use expyfun to extract events write MNE events file to disk.""" for subj in subjects: print(" Running subject %s... " % subj, end="") # Figure out what our filenames should be out_dir = op.join(p.work_dir, subj, p.list_dir) if not op.isdir(out_dir): os.mkdir(out_dir) for run_name in p.run_names: # Extract standard events fname = op.join(p.work_dir, subj, p.raw_dir, (run_name % subj) + p.raw_fif_tag) fname_out = op.join(out_dir, f"ALL_{run_name % subj}-eve.lst") events, _ = extract_expyfun_events(fname)[:2] events[:, 2] += offsets[run_name.split('_')[-1]] if run_name in ("%s_am", "%s_ids"): mne.write_events(fname_out, events) continue # Find the right mismatch .tab file raw = mne.io.read_raw_fif(fname, allow_maxshield="yes") exp_subj = subj.split("_")[1].rstrip("ab") tab_files = sorted(glob.glob(op.join(tabdir, f"{exp_subj}_*.tab"))) assert len(tab_files) good = np.zeros(len(tab_files), bool) got = list() ts = list() for tab_file in tab_files: with open(tab_file, "r") as fid: header = fid.readline().lstrip("#").strip() if "runfile" in header: # errant session that breaks things assert subj == "bad_130a" header = re.sub('"runfile.*\'\\)"', "'1'", header) header = json.loads(header.replace("'", '"')) assert header["participant"] == exp_subj if "." in header["date"]: fmt = "%Y-%m-%d %H_%M_%S.%f" else: fmt = "%Y-%m-%d %H_%M_%S" t_tab = datetime.datetime.strptime( header["date"], fmt).replace(tzinfo=timezone("US/Pacific")) t_raw = raw.info["meas_date"] ts.append(t_tab) # offsets between the Neuromag DAQ and expyfun computer off_minutes = abs((t_raw - t_tab).total_seconds() / 60.0) got.append( (off_minutes, header["exp_name"], header["session"])) # pick something in the right time frame, and the correct # session good = [ m < 120 and e == "syllable" and s in ("1", "3") for m, e, s in got ] if sum(good) == 2: idx = np.where(good)[0] sizes = [os.stat(tab_files[ii]).st_size for ii in idx] print(f" Triaging based on file sizes: {sizes}") for ii in idx: good[ii] = False good[idx[np.argmax(sizes)]] = True assert sum(good) == 1, sum(good) idx = np.where(good)[0][0] fname_tab = tab_files[idx] print( f' Selected {tab_files[idx]}:\n' f' Raw data {raw.info["meas_date"].astimezone(ts[idx].tzinfo)}\n' f' Tab file {ts[idx]}') # We should only have one candidate file assert sum(good) == 1, sum(good) fname_tab = tab_files[np.where(good)[0][0]] data = read_tab(fname_tab, allow_last_missing=True) # Correct the triggers if subj in ("bad_921a", "bad_925a"): use_map = OTHER_TRIGGER_MAP else: use_map = TRIGGER_MAP new_nums = np.array([use_map[d["trial_id"][0][0]] for d in data], int) exp_times = [d["play"][0][1] for d in data] # Sometimes we are missing the last one assert len(data) >= len(events), (len(data), len(events)) n_missed = len(data) - len(events) if n_missed: if subj == "bad_117a": sl = slice(n_missed - 1, -1, None) else: sl = slice(None, -n_missed, None) data = data[sl] new_nums = new_nums[sl] exp_times = exp_times[sl] corr = np.corrcoef(events[:, 0], exp_times)[0, 1] assert corr > 9e-20, corr wrong = new_nums != events[:, 2] if wrong.any(): print(f" Replacing {wrong.sum()}/{len(wrong)} TTL IDs") events[:, 2] = new_nums assert np.in1d(events[:, 2], IN_NUMBERS).all() print(" Counts: " + " ".join(f"{name.upper()}: {(events[:, 2] == num).sum()}" for name, num in zip(IN_NAMES, IN_NUMBERS))) mne.write_events(fname_out, events)
np.sum(slot_codes != '-', axis=1)) # init DF trial_df = pd.DataFrame() slots_df = pd.DataFrame() # loop over subjects for subj in subjects: logfile = glob(op.join(datadir, f'{subj}_*.tab')) assert len(logfile) == 1 with open(logfile[0], 'r') as f: # first line includes dict with experiment metadata metadata = eval(f.readline().strip()[2:]) assert metadata['participant'] == subj session = int(metadata['session']) - 1 all_trials = read_tab(logfile[0]) trials = all_trials[-n_trials:] # omit training this_trial_df = pd.DataFrame(trials, columns=('trial_id', 'play', 'keypress')) this_trial_df.rename(columns=dict(play='trial_onset', keypress='presses'), inplace=True) this_trial_df['trial_id'] = this_trial_df['trial_id'].map(parse_trial_id) this_trial_df['trial_onset'] = this_trial_df['trial_onset'].map( lambda x: x[0][1]) this_trial_df['presses'] = this_trial_df['presses'].map(parse_presses) # merge in info from cond_mat (put in correct order first!) block_order = params['blocks'][session] trial_order = np.concatenate( [params['block_trials'][block] for block in block_order]) this_cond_mat = cond_mat[trial_order] assert np.array_equal(np.array(this_trial_df['trial_id'].values.tolist()),
""" ============ Parsing demo ============ This example shows some of the functionality of ``read_tab``. """ # Author: Eric Larson <*****@*****.**> # # License: BSD (3-clause) from os import path as op import ast from expyfun.io import read_tab print(__doc__) fname = op.join(op.dirname(__file__), 'sample.tab') # from simple_experiment data = read_tab(fname) print('Number of trials: %s' % len(data)) keys = list(data[0].keys()) print('Data keys: %s\n' % keys) for di, d in enumerate(data): if d['trial_id'][0][0] == 'multi-tone': print('Trial %s multi-tone' % (di + 1)) targs = ast.literal_eval(d['multi-tone trial'][0][0]) presses = [int(k[0]) for k in d['keypress']] print(' Targs: %s\n Press: %s' % (targs, presses))