def score(p, subjects): """Scoring function""" for subj in subjects: print(' Running subject %s... ' % subj, end='') # Figure out what our filenames should be out_dir = op.join(p.work_dir, subj, p.list_dir) if not op.isdir(out_dir): os.mkdir(out_dir) for run_name in p.run_names: fname = op.join(p.work_dir, subj, p.raw_dir, (run_name % subj) + p.raw_fif_tag) events, presses = extract_expyfun_events(fname)[:2] for ii in range(len(events)): events[ii, 2] = _expyfun_dict[events[ii, 2]] fname_out = op.join(out_dir, 'ALL_' + (run_name % subj) + '-eve.lst') mne.write_events(fname_out, events) # get subject performance devs = (events[:, 2] % 2 == 1) has_presses = np.array([len(pr) > 0 for pr in presses], bool) n_devs = np.sum(devs) hits = np.sum(has_presses[devs]) fas = np.sum(has_presses[~devs]) misses = n_devs - hits crs = (len(devs) - n_devs) - fas print('HMFC: %s, %s, %s, %s' % (hits, misses, fas, crs))
def reconstruct_events(p, subjects): """Reconstruct events from expyfun tab file """ for subj in subjects: print(' Running subject %s... ' % subj, end='') # Figure out what our filenames should be out_dir = op.join(p.work_dir, subj, p.list_dir) for run_name in p.run_names: print(subj) fname = op.join(p.work_dir, subj, p.raw_dir, (run_name % subj) + p.raw_fif_tag) tab_file = op.join(p.work_dir, subj, p.list_dir, (run_name % subj + '.tab')) evs, _ = extract_expyfun_events(fname)[:2] raw = mne.io.read_raw_fif(fname, allow_maxshield='yes') data = read_tab(tab_file) new_evs = np.zeros(evs.shape, dtype=np.int) for i in range(len(evs)): new_evs[i, 0] = raw.time_as_index(data[i]['play'][0][1]) # classify event type based on expyfun stimulus new_evs[i, 1] = 0 if data[i]['trial_id'][0][0] == 'Dp01bw6-rms': trigger = 103 elif data[i]['trial_id'][0][0] == 'Dp01bw1-rms': trigger = 104 elif data[i]['trial_id'][0][0] == 'Dp01bw10-rms': trigger = 105 new_evs[i, 2] = trigger fname_out = op.join(out_dir, 'ALL_' + (run_name % subj) + '-eve.lst') mne.write_events(fname_out, new_evs)
def score(p, subjects): """Scoring function""" for subj in subjects: print(" Running subject %s... " % subj, end="") # Figure out what our filenames should be out_dir = op.join(p.work_dir, subj, p.list_dir) if not op.isdir(out_dir): os.mkdir(out_dir) for run_name in p.run_names: fname = op.join(p.work_dir, subj, p.raw_dir, (run_name % subj) + p.raw_fif_tag) events, presses = extract_expyfun_events(fname)[:2] for ii in range(len(events)): events[ii, 2] = _expyfun_dict[events[ii, 2]] fname_out = op.join(out_dir, "ALL_" + (run_name % subj) + "-eve.lst") mne.write_events(fname_out, events) # get subject performance devs = events[:, 2] % 2 == 1 has_presses = np.array([len(pr) > 0 for pr in presses], bool) n_devs = np.sum(devs) hits = np.sum(has_presses[devs]) fas = np.sum(has_presses[~devs]) misses = n_devs - hits crs = (len(devs) - n_devs) - fas print("HMFC: %s, %s, %s, %s" % (hits, misses, fas, crs))
def score(file_path): """Scoring function""" events, presses = extract_expyfun_events(file_path)[:2] i = np.arange(len(events)) events[i, 2] -= 1 mask = events[i, 2] > 0 events = events[mask] presses = np.array(presses)[mask].tolist() # get subject performance # boolean mask using modulus of target event and 2 targets = events[:, 2] % 2 == 0 has_presses = np.array([len(pr) > 0 for pr in presses], bool) n_targets = np.sum(targets) hits = np.sum(has_presses[targets]) false_alarms = np.sum(has_presses[~targets]) misses = n_targets - hits print("HMF: %s, %s, %s" % (hits, misses, false_alarms)) return events
def score(p, subjects): """Scoring function""" for subj in subjects: print(' Running subject %s... ' % subj, end='') # Figure out what our filenames should be out_dir = op.join(p.work_dir, subj, p.list_dir) if not op.isdir(out_dir): os.mkdir(out_dir) for run_name in p.run_names: print(subj) fname = op.join(p.work_dir, subj, p.raw_dir, (run_name % subj) + p.raw_fif_tag) events, _ = extract_expyfun_events(fname)[:2] events[:, 2] += 100 fname_out = op.join(out_dir, 'ALL_' + (run_name % subj) + '-eve.lst') mne.write_events(fname_out, events)
def score(p, subjects): """Scoring function""" for subj in subjects: print(' Running subject %s... ' % subj, end='') # Figure out what our filenames should be out_dir = op.join(p.work_dir, subj, p.list_dir) if not op.isdir(out_dir): os.mkdir(out_dir) for run_name in p.run_names: print(subj) fname = op.join(p.work_dir, subj, p.raw_dir, (run_name % subj) + p.raw_fif_tag) # events = mne.find_events(mne.io.Raw(fname, allow_maxshield='True'), stim_channel='STI101') #events = events[events[:, 2] == 1] events, _ = extract_expyfun_events(fname)[:2] events[:, 2] += 10 fname_out = op.join(out_dir, 'ALL_' + (run_name % subj) + '-eve.lst') mne.write_events(fname_out, events)
#list_fname = '/Volumes/TimeMachineBackups/MEG_Data/SASI/sentnew2b_FishNew.lst' ======= list_fname = '/Volumes/TimeMachineBackups/MEG_Data/SASI/sentnew2a_FishNew.lst' >>>>>>> dcf4d606580048b88be0ee4055e6e022c5ef1d66 list_info = parse_list(list_fname) list_info_temp = list_info <<<<<<< HEAD raw_fname = '/Volumes/TimeMachineBackups/MEG_Data/SASI/sasi_129/sasi_129_raw_tsss_mc.fif' ======= raw_fname = '/Volumes/TimeMachineBackups/MEG_Data/SASI/sasi_101/sasi_101_raw_tsss_mc.fif' >>>>>>> dcf4d606580048b88be0ee4055e6e022c5ef1d66 sentnew2_events, _ = extract_expyfun_events(raw_fname)[:2] # Takes 20 seconds # Format ids sentnew2_events[:, 2] += 10 sentnew2_events_offset = np.zeros([1500,3], dtype=int) sentnew2_critical = np.zeros([1500,3], dtype=int) <<<<<<< HEAD fname_out = '/Volumes/TimeMachineBackups/MEG_Data/SASI/sasi_129/sentnew2a_FishNew-eve.lst' ======= fname_out = '/Volumes/TimeMachineBackups/MEG_Data/SASI/sasi_101/sentnew2a_FishNew-eve.lst' >>>>>>> dcf4d606580048b88be0ee4055e6e022c5ef1d66 #op.join(out_dir, 'ALL_' + (raw_fname) + '-eve.lst') mne.write_events(fname_out, sentnew2_events) # NEXT : loop through list_info for each sentence & add offset to timestamp
def score(p, subjects): """Use expyfun to extract events write MNE events file to disk.""" for subj in subjects: print(" Running subject %s... " % subj, end="") # Figure out what our filenames should be out_dir = op.join(p.work_dir, subj, p.list_dir) if not op.isdir(out_dir): os.mkdir(out_dir) for run_name in p.run_names: # Extract standard events fname = op.join(p.work_dir, subj, p.raw_dir, (run_name % subj) + p.raw_fif_tag) fname_out = op.join(out_dir, f"ALL_{run_name % subj}-eve.lst") events, _ = extract_expyfun_events(fname)[:2] events[:, 2] += offsets[run_name.split('_')[-1]] if run_name in ("%s_am", "%s_ids"): mne.write_events(fname_out, events) continue # Find the right mismatch .tab file raw = mne.io.read_raw_fif(fname, allow_maxshield="yes") exp_subj = subj.split("_")[1].rstrip("ab") tab_files = sorted(glob.glob(op.join(tabdir, f"{exp_subj}_*.tab"))) assert len(tab_files) good = np.zeros(len(tab_files), bool) got = list() ts = list() for tab_file in tab_files: with open(tab_file, "r") as fid: header = fid.readline().lstrip("#").strip() if "runfile" in header: # errant session that breaks things assert subj == "bad_130a" header = re.sub('"runfile.*\'\\)"', "'1'", header) header = json.loads(header.replace("'", '"')) assert header["participant"] == exp_subj if "." in header["date"]: fmt = "%Y-%m-%d %H_%M_%S.%f" else: fmt = "%Y-%m-%d %H_%M_%S" t_tab = datetime.datetime.strptime( header["date"], fmt).replace(tzinfo=timezone("US/Pacific")) t_raw = raw.info["meas_date"] ts.append(t_tab) # offsets between the Neuromag DAQ and expyfun computer off_minutes = abs((t_raw - t_tab).total_seconds() / 60.0) got.append( (off_minutes, header["exp_name"], header["session"])) # pick something in the right time frame, and the correct # session good = [ m < 120 and e == "syllable" and s in ("1", "3") for m, e, s in got ] if sum(good) == 2: idx = np.where(good)[0] sizes = [os.stat(tab_files[ii]).st_size for ii in idx] print(f" Triaging based on file sizes: {sizes}") for ii in idx: good[ii] = False good[idx[np.argmax(sizes)]] = True assert sum(good) == 1, sum(good) idx = np.where(good)[0][0] fname_tab = tab_files[idx] print( f' Selected {tab_files[idx]}:\n' f' Raw data {raw.info["meas_date"].astimezone(ts[idx].tzinfo)}\n' f' Tab file {ts[idx]}') # We should only have one candidate file assert sum(good) == 1, sum(good) fname_tab = tab_files[np.where(good)[0][0]] data = read_tab(fname_tab, allow_last_missing=True) # Correct the triggers if subj in ("bad_921a", "bad_925a"): use_map = OTHER_TRIGGER_MAP else: use_map = TRIGGER_MAP new_nums = np.array([use_map[d["trial_id"][0][0]] for d in data], int) exp_times = [d["play"][0][1] for d in data] # Sometimes we are missing the last one assert len(data) >= len(events), (len(data), len(events)) n_missed = len(data) - len(events) if n_missed: if subj == "bad_117a": sl = slice(n_missed - 1, -1, None) else: sl = slice(None, -n_missed, None) data = data[sl] new_nums = new_nums[sl] exp_times = exp_times[sl] corr = np.corrcoef(events[:, 0], exp_times)[0, 1] assert corr > 9e-20, corr wrong = new_nums != events[:, 2] if wrong.any(): print(f" Replacing {wrong.sum()}/{len(wrong)} TTL IDs") events[:, 2] = new_nums assert np.in1d(events[:, 2], IN_NUMBERS).all() print(" Counts: " + " ".join(f"{name.upper()}: {(events[:, 2] == num).sum()}" for name, num in zip(IN_NAMES, IN_NUMBERS))) mne.write_events(fname_out, events)
if list_type == 'B': list_fname = data_path + 'sentnew2b_FishNew.lst' subjs = [ 'sasi_110', 'sasi_114', 'sasi_118', 'sasi_120', 'sasi_130', 'sasi_134', 'sasi_141', 'sasi_144', 'sasi_147' ] else: print("list_type must be A or B") list_info = parse_list(list_fname) list_info_temp = list_info for i in subjs: raw_fname = op.join(data_path, '%s' % i, 'raw_fif', '%s_raw.fif' % i) sentnew2_events, _r = extract_expyfun_events(raw_fname)[:2] # Format ids sentnew2_events[:, 2] += 10 sentnew2_events_offset = np.zeros([1500, 3], dtype=int) sentnew2_critical = np.zeros([1500, 3], dtype=int) fname_out = op.join(data_path, '%s' % i, 'lists', 'orig_events_%s-eve.lst' % i) mne.write_events(fname_out, sentnew2_events) # Loop through list_info for each sentence & add offset to timestamp num_sentences = (len(sentnew2_events)) - 1 # Loop through sentences total_word_count = 0 last_word = 0 #for sentence_count in range(0, 3):