예제 #1
0
def make(fo):
    print('making artifacts, cnn bad epoch, xml files per block')
    cnn_model_name = 'cnn_5X5_2048_1'

    for i in range(1, 49):
        p = e.Participant(i, fid2ort=fo)
        p.add_all_sessions()
        for s in p.sessions:
            for b in s.blocks:
                if not os.path.isfile(path.eeg100hz + windower.make_name(b) +
                                      '.npy'):
                    continue
                if b.start_marker_missing or b.end_marker_missing:
                    d = load_100hz_numpy_block(windower.make_name(b))
                    w = windower.Windower(b, nsamples=d.shape[1], sf=100)
                w = windower.Windower(b, sf=100)
                if not os.path.isfile(path.artifact_data_all_pp + w.name +
                                      '_pred.npy'):
                    print(path.artifact_data_all_pp + w.name + '_pred.npy',
                          'no prediction file present')
                    continue
                a = xml_cnn.xml_cnn(w)
                a.make_bad_epoch()
                a.bad_epochs2xml()
                a.write_bad_epoch_xml()
                print(w.name)
예제 #2
0
def count_words(save=False):
    '''count words in the EEG experiment per block and exp type.
	count all presented words (all), count all presented content words (cw-all)
	count the subset of words that is useable (ok), count the cw subset (cw-ok)
	save 		whether to save the dict (default false because it exists)
	'''
    word_counter = {}
    bad_blocks = []
    output = []

    for i in range(1, 49):
        p = e.Participant(i)
        pp_id = p.pp_id
        p.add_all_sessions()
        print(p)
        for s in p.sessions:
            exp_type = s.exp_type
            if exp_type not in word_counter.keys(): word_counter[exp_type] = 0
            for b in s.blocks:
                try:
                    b.extract_words(content_word=False)
                except:
                    bad_blocks.append(b)
                    continue
                if not hasattr(b, 'extracted_words'):
                    bad_blocks.append(b)
                    continue
                add_wc(b, word_counter, exp_type)
                add_wc(b, word_counter, b.name)
    if save:
        save_count_ds(word_counter)
        save_count_dict(word_counter)
    return word_counter, bad_blocks
예제 #3
0
def check_p(p):
    '''ensures p is an participant object and correctly loaded.
	p 	int (1-48) or participant object, if sessions are not loaded this function will do so
	'''
    if type(p) == int or not hasattr(p, 'so'):
        if type(p) != int: p = p.pp_id
        p = e.Participant(p)
        p.add_all_sessions()
    return p
def filename2block(f, fo=None):
    '''Return block object that correspond to the bad_epoch.'''
    pp_id = int(f.split('pp')[-1].split('_')[0])
    exp_type = f.split('exp-')[-1].split('_')[0]
    bid = f.split('bid-')[-1].split('.')[0]
    p = e.Participant(pp_id, fid2ort=fo)
    p.add_session(exp_type)
    s = getattr(p, 's' + exp_type)
    return getattr(s, 'b' + bid)
예제 #5
0
def make_n400_word2surprisal(overwrite= True,filename = ''):
	if filename == '': filename = path.data + 'n400word2surprisal'
	if overwrite: open(filename,'w')
	p = e.Participant(1)
	fo = p.fid2ort
	for i in range(1,49):
		p = e.Participant(i,fo)
		p.add_all_sessions()
		for s in p.sessions:
			for b in s.blocks:
				for i,w in enumerate(b.words):
					line = make_n400_name(b, i) 
					if not hasattr(w,'ppl'): line += '\t'+'NA' + '\n'
					else: 
						line += '\t'+str(w.ppl.logprob) + ',' + str(w.ppl.logprob_register) 
						line += ',' + str(w.ppl.logprob_other1) + ',' + str(w.ppl.logprob_other2) + '\n'
					with open(filename,'a') as fout:
						fout.write(line)
예제 #6
0
def name2block(name, fo = None):
	'''Based on the name made by the windower object, create and return the block object.'''
	pp_id = name2pp_id(name)
	exp_type = name2exp_type(name)
	bid = name2bid(name)

	p = e.Participant(pp_id,fid2ort = fo)
	p.add_session(exp_type)
	s = getattr(p,'s'+exp_type)
	return getattr(s,'b'+str(bid))
예제 #7
0
    def __init__(self, p=None, history_length=4):
        '''Hold all words of all experiments with PMN information (phonological mismatch negativity)
		p 		participant object with all sessions loaded p = e.Participant(1) p.add_all_sessions()
		his... 	number of previous words added to the pmn word (to determine entropy) 
		'''
        self.p = p
        self.history_length = history_length
        if not p:
            self.p = experiment.Participant(1)
            self.p.add_all_sessions()
        self.make_data()
        self.add_index()
def make_matrix(fo, add_pp_info = False,normalize_data = True,save_directory =None,make_data= True):
	print('making artifact training np matrices per block, with default 1 sec and 99 perc overlap')
	if save_directory == None: save_directory = path.artifact_training_data
	print('save directory:',save_directory)
	if make_data: fout = open('nrows_per_block_v2','w')
	nrows = 0

	for i in range(1,49):
		p = e.Participant(i,fid2ort = fo)
		p.add_all_sessions()
		for s in p.sessions:
			for b in s.blocks:
				if os.path.isfile(path.artifact_training_data+ windower.make_name(b) +'.npy'):
					# check whether windowed data is already present
					continue
				if not os.path.isfile(path.eeg100hz + windower.make_name(b) +'.npy'):
					# check whether downsampled data is present to load
					continue
				print(windower.make_name(b))
				d = load_100hz_numpy_block(windower.make_name(b))
				if b.start_marker_missing or b.end_marker_missing:
					w = windower.Windower(b,nsamples= d.shape[1], sf = 100,window_overlap_percentage = .99)
				else:
					w = windower.Windower(b,sf = 100,window_overlap_percentage = .99)
				if w.fn_annotation == 0: 
					print('skipping:',w.name,'NO ANNOTATION')
					continue # if there is no annotation file skip
				print('processing:',w.name,w.fn_annotation)
				w.make_info_matrix(add_pp_info = add_pp_info)
				if make_data:
					d = remove_channels(d)
					d = windower.window_data(d,w.windows['sf100'],flatten=True,normalize= normalize_data)
					# d = unit_norm(d)
					# d = normalize_numpy_matrix(d)
					rows = d.shape[0]
					nrows += rows
					fout.write(w.name + '\t' + str(rows) + '\n')
					print (d.shape, w.info_matrix.shape[0])
					assert d.shape[0] == w.info_matrix.shape[0]
					np.save(save_directory+ w.name + '_data',d)
				np.save(save_directory+ w.name + '_info',w.info_matrix)

	if make_data:
		fout.write('all_blocks\t'+str(nrows)+'\n')
		fout.close()
예제 #9
0
def handle_all(force_make=False):
    output = []
    bad_blocks = []
    for i in range(1, 49):
        print(i)
        p = e.Participant(i)
        p.add_all_sessions()
        for s in p.sessions:
            for b in s.blocks:
                try:
                    output.extend(handle_block(b, force_make, ppls=s.ppl))
                    print('-' * 100, '\n' * 9, b.make_name(), '\t\t\tsucces',
                          '\n' * 9, '-' * 100)
                except:
                    bad_blocks.append(b)
                    print('!' * 100, '\n' * 9, b.make_name(), '\t\t\tfailed',
                          '\n' * 9, '!' * 100)
    return output, bad_blocks
예제 #10
0
def make_blinks():
    '''Make all blinks objects and store them as pickled blinks objects.'''
    for i in range(1, 49):
        print('-' * 50)
        print('-' * 50)
        print('make blinks participant:', i)
        print('-' * 50)
        print('-' * 50)
        if not os.path.isfile(path.blinks + 'pp' + str(i) + '.done'):
            p = experiment.Participant(i)
            p.add_all_sessions()
            for s in p.sessions:
                print(s)
                for b in s.blocks:
                    b.make_blinks(False)
            fout = open(path.blinks + 'pp' + str(i) + '.done', 'w')
            fout.close()
        else:
            print('skipping participent:', i, 'already done')
예제 #11
0
def make(fo):
    print('making event info xml files per block')
    nartifacts = 0
    nclean = 0

    fout = open('artifact_info.txt', 'w')
    for i in range(1, 49):
        p = e.Participant(i, fid2ort=fo)
        p.add_all_sessions()
        for s in p.sessions:
            for b in s.blocks:
                if not os.path.isfile(path.eeg100hz + windower.make_name(b) +
                                      '.npy'):
                    continue
                if b.start_marker_missing or b.end_marker_missing:
                    d = load_100hz_numpy_block(windower.make_name(b))
                    w = windower.Windower(b, nsamples=d.shape[1], sf=100)
                w = windower.Windower(b, sf=100)
                if not os.path.isfile(path.artifact_data_all_pp + w.name +
                                      '_pred.npy'):
                    print(path.artifact_data_all_pp + w.name + '_pred.npy',
                          'no prediction file present')
                    continue
                if b.exp_type == 'k': nepoch = 20
                if b.exp_type == 'o': nepoch = 60
                if b.exp_type == 'ifadv': nepoch = 80
                ii_xml = xml_cnn.xml_cnn(w,
                                         select_nartifact=nepoch,
                                         select_nclean=nepoch,
                                         cnn_model_name='cnn_5X5_2048_1')
                ii_xml.make_index_info()
                ii_xml.make_selection()
                ii_xml.write()
                nartifacts += ii_xml.nartifact_indices
                nclean += ii_xml.nclean_indices
                print(w.name + '\t' + ii_xml.nclean + '\t' + ii_xml.nartifact +
                      '\n')
                fout.write(w.name + '\t' + ii_xml.nclean + '\t' +
                           ii_xml.nartifact + '\n')

    fout.write('all_blocks\t' + str(nclean) + '\t' + str(nartifacts) + '\n')
    fout.close()
예제 #12
0
import experiment as e
import video_log as vl

for i in range(1,49):
	print('Loading participant:',i)
	p = e.Participant(i)
	p.add_all_sessions()
	for s in p.sessions:
		v = vl.video_log(s.log)
		if v.ok:
			for marker in v.marker2time.keys():
				if marker % 10 == 0:
					v.extract_frames(marker,5)
		else: print('skipping:',v.pp_id,v.exp_type,'no marker file/or log file')

		
def make_matrix(fo,
                add_pp_info=False,
                normalize_data=True,
                save_directory=None,
                make_data=True,
                start_pp=1,
                overwrite=False):
    print(
        'making artifact training np matrices per block, with default 1 sec and 99 perc overlap'
    )
    if save_directory == None:
        save_directory = path.channel_artifact_training_data
    print('save directory:', save_directory)
    if make_data: fout = open(path.bak + 'channel_nrows_per_block-p', 'w')
    nrows = 0

    for i in range(start_pp, 49):
        p = e.Participant(i, fid2ort=fo)
        p.add_all_sessions()
        for s in p.sessions:
            for b in s.blocks:
                if os.path.isfile(path.channel_artifact_training_data +
                                  windower.make_name(b) +
                                  '_data.npy') and not overwrite:
                    # check whether windowed data is already present
                    continue
                if not os.path.isfile(path.eeg100hz + windower.make_name(b) +
                                      '.npy'):
                    # check whether downsampled data is present to load
                    continue
                print(windower.make_name(b))
                d = load_100hz_numpy_block(windower.make_name(b))
                if b.start_marker_missing or b.end_marker_missing:
                    w = windower.Windower(b,
                                          nsamples=d.shape[1] * 10,
                                          sf=100,
                                          window_overlap_percentage=.99)
                else:
                    w = windower.Windower(b,
                                          sf=100,
                                          window_overlap_percentage=.99)
                f = windower.block2channel_fn_annotation(
                    w.b, path.channel_artifacts_clean)
                if f == 0:
                    print('skipping:', w.name, 'NO ANNOTATION')
                    continue  # if there is no annotation file skip
                print('processing:', w.name, w.fn_annotation)
                w.make_channel_ca_info_matrix(add_pp_info=add_pp_info)
                if make_data:
                    d = remove_channels(d)
                    d = windower.window_data(d,
                                             w.windows['sf100'],
                                             flatten=True,
                                             normalize=normalize_data,
                                             cut_off=300)
                    rows = d.shape[0]
                    nrows += rows
                    fout.write(w.name + '\t' + str(rows) + '\n')
                    print(d.shape, w.info_matrix.shape[0])
                    assert d.shape[0] == w.info_matrix.shape[0]
                    # d = insert_target_channel_rows(d,nchannels=26,kernel_size=6)
                    # IMPORTANT: this was not commented when I checked the file
                    # however resulting files look like it was commented during execution
                    # insert target channel is done later in pipeline
                    # not sure what happened
                    np.save(save_directory + w.name + '_data', d)
                np.save(save_directory + w.name + '_info', w.info_matrix)

    if make_data:
        fout.write(path.bak + 'all_blocks\t' + str(nrows) + '\n')
        fout.close()
예제 #14
0
def bad_epoch2block(be,fo = None):
	'''Return block object that correspond to the bad_epoch.'''
	p = e.Participant(be.pp_id,fid2ort = fo)
	p.add_session(be.exp_type)
	s = getattr(p,'s' + be.exp_type)
	return getattr(s, 'b' + str(be.bid))
예제 #15
0
import numpy as np
import os
import path
'''Downsample all data from 1000hz to 100hz sf
The code uses MNE function to downsample
Visually checked time domain and frequency domain both looked reasonable.
100hz -> nyquist at 50hz, which is ac frequency (spike in the spectrum),
however this is comfortably far from 30hz lowpass filter on the data

I do not know whether MNE uses zerophase filtering before donwsample should check.
'''

new_sf = 100
error = []

p = e.Participant(1)
fo = p.fid2ort
for i in range(1, 49):
    print('loading participant:', i)
    p = e.Participant(i, fid2ort=fo)
    p.add_all_sessions()
    for s in p.sessions:
        print(s.exp_type)
        for b in s.blocks:
            print(b.bid)
            name = path.eeg100hz + b.block2name()
            print(name, name + '.npy')
            if not os.path.isfile(name + '.npy') and not b.block_missing:
                b.load_eeg_data(sf=new_sf)
                if b.raw != 0:
                    print('saving data to:', name)
예제 #16
0
	def _load_participant(self,number):
		'''Load a specific participant, participant numbers 1 - 48.'''
		self.current_p = e.Participant(number)
		self.current_p.add_all_sessions()
		self.pp.append(self.current_p)