コード例 #1
0
def make(fo):
    print('making artifacts, cnn bad epoch, xml files per block')
    cnn_model_name = 'cnn_5X5_2048_1'

    for i in range(1, 49):
        p = e.Participant(i, fid2ort=fo)
        p.add_all_sessions()
        for s in p.sessions:
            for b in s.blocks:
                if not os.path.isfile(path.eeg100hz + windower.make_name(b) +
                                      '.npy'):
                    continue
                if b.start_marker_missing or b.end_marker_missing:
                    d = load_100hz_numpy_block(windower.make_name(b))
                    w = windower.Windower(b, nsamples=d.shape[1], sf=100)
                w = windower.Windower(b, sf=100)
                if not os.path.isfile(path.artifact_data_all_pp + w.name +
                                      '_pred.npy'):
                    print(path.artifact_data_all_pp + w.name + '_pred.npy',
                          'no prediction file present')
                    continue
                a = xml_cnn.xml_cnn(w)
                a.make_bad_epoch()
                a.bad_epochs2xml()
                a.write_bad_epoch_xml()
                print(w.name)
コード例 #2
0
ファイル: cnn_data.py プロジェクト: martijnbentum/iceeg
	def block2eegdata(self,b):
		'''Load the 100hz eeg data that corresponds to the block object and returns a windowed version of it, identical to
		the method used in make_artifact_matrix_v2.py.'''
		self.clean_up()
		self.d = load_100hz_numpy_block(windower.make_name(b))
		if b.start_marker_missing or b.end_marker_missing:
			# the eeg data in d has a sf 100, windower expects an sf 1000, the sf parameter adjust the start and end times of snippets, therefore the nsamples needs to be multiplied by 10.
			w = windower.Windower(b,nsamples= self.d.shape[1] * 10, sf = 100,window_overlap_percentage = .99)
		else:
			w = windower.Windower(b,sf = 100,window_overlap_percentage = .99)
		self.d = remove_channels(self.d)
		self.d = windower.window_data(self.d,w.windows['sf100'],flatten=True,normalize= True)
コード例 #3
0
def make_matrix(fo, add_pp_info = False,normalize_data = True,save_directory =None,make_data= True):
	print('making artifact training np matrices per block, with default 1 sec and 99 perc overlap')
	if save_directory == None: save_directory = path.artifact_training_data
	print('save directory:',save_directory)
	if make_data: fout = open('nrows_per_block_v2','w')
	nrows = 0

	for i in range(1,49):
		p = e.Participant(i,fid2ort = fo)
		p.add_all_sessions()
		for s in p.sessions:
			for b in s.blocks:
				if os.path.isfile(path.artifact_training_data+ windower.make_name(b) +'.npy'):
					# check whether windowed data is already present
					continue
				if not os.path.isfile(path.eeg100hz + windower.make_name(b) +'.npy'):
					# check whether downsampled data is present to load
					continue
				print(windower.make_name(b))
				d = load_100hz_numpy_block(windower.make_name(b))
				if b.start_marker_missing or b.end_marker_missing:
					w = windower.Windower(b,nsamples= d.shape[1], sf = 100,window_overlap_percentage = .99)
				else:
					w = windower.Windower(b,sf = 100,window_overlap_percentage = .99)
				if w.fn_annotation == 0: 
					print('skipping:',w.name,'NO ANNOTATION')
					continue # if there is no annotation file skip
				print('processing:',w.name,w.fn_annotation)
				w.make_info_matrix(add_pp_info = add_pp_info)
				if make_data:
					d = remove_channels(d)
					d = windower.window_data(d,w.windows['sf100'],flatten=True,normalize= normalize_data)
					# d = unit_norm(d)
					# d = normalize_numpy_matrix(d)
					rows = d.shape[0]
					nrows += rows
					fout.write(w.name + '\t' + str(rows) + '\n')
					print (d.shape, w.info_matrix.shape[0])
					assert d.shape[0] == w.info_matrix.shape[0]
					np.save(save_directory+ w.name + '_data',d)
				np.save(save_directory+ w.name + '_info',w.info_matrix)

	if make_data:
		fout.write('all_blocks\t'+str(nrows)+'\n')
		fout.close()
コード例 #4
0
def make(fo):
    print('making event info xml files per block')
    nartifacts = 0
    nclean = 0

    fout = open('artifact_info.txt', 'w')
    for i in range(1, 49):
        p = e.Participant(i, fid2ort=fo)
        p.add_all_sessions()
        for s in p.sessions:
            for b in s.blocks:
                if not os.path.isfile(path.eeg100hz + windower.make_name(b) +
                                      '.npy'):
                    continue
                if b.start_marker_missing or b.end_marker_missing:
                    d = load_100hz_numpy_block(windower.make_name(b))
                    w = windower.Windower(b, nsamples=d.shape[1], sf=100)
                w = windower.Windower(b, sf=100)
                if not os.path.isfile(path.artifact_data_all_pp + w.name +
                                      '_pred.npy'):
                    print(path.artifact_data_all_pp + w.name + '_pred.npy',
                          'no prediction file present')
                    continue
                if b.exp_type == 'k': nepoch = 20
                if b.exp_type == 'o': nepoch = 60
                if b.exp_type == 'ifadv': nepoch = 80
                ii_xml = xml_cnn.xml_cnn(w,
                                         select_nartifact=nepoch,
                                         select_nclean=nepoch,
                                         cnn_model_name='cnn_5X5_2048_1')
                ii_xml.make_index_info()
                ii_xml.make_selection()
                ii_xml.write()
                nartifacts += ii_xml.nartifact_indices
                nclean += ii_xml.nclean_indices
                print(w.name + '\t' + ii_xml.nclean + '\t' + ii_xml.nartifact +
                      '\n')
                fout.write(w.name + '\t' + ii_xml.nclean + '\t' +
                           ii_xml.nartifact + '\n')

    fout.write('all_blocks\t' + str(nclean) + '\t' + str(nartifacts) + '\n')
    fout.close()
コード例 #5
0
    def __init__(self,
                 b,
                 length=4,
                 corrector='martijn',
                 filename='',
                 load_xml=True):
        '''Interface to easily annotate eeg signal
		prediction 	filename for np array with classification for each segment
		length 		duration in seconds of an epoch in the interface
		coder 		Name of the coder (if automatic it should be computer)
		filename 	specify xml file that is loaded for bad_epochs, default is to generate filename based on block info
		'''
        self.show_label = False
        self.show_correctness = False
        self.show_perc = False

        self.key_dict = {
            '1': 'clean',
            '2': 'artifact',
            'c': 'channel',
            'u': 'signal_unclear',
            'd': 'ac-balance_unclear',
            '3': 'back'
        }
        self.redraw = False
        self.b = b
        self.w = windower.Windower(b, sf=100)
        self.set_info()
        if filename == '' or type(filename) != str:
            self.filename = path.artifact_data_all_pp + self.w.name + '_artifacts-sf100.xml'
        else:
            self.filename = filename
        self.length = int(float(length) * 100)
        self.before, self.after = int(self.length / 2), int(self.length / 2)
        self.e_index = 0
        self.event_dict = {}
        self.corrector = corrector
        self.bad_epochs = []
        self.boundaries = []
        self.load_from_xml(self.filename)
        self.load_data()
        self.last_save = time.time()
        self.make_epoch()
        print(self.bad_epochs)
        self.plot_epoch('all')
        self.reset_visible()
        self.handle_plot(True)
        self.redraw = False
        self.run()
コード例 #6
0
    def __init__(
            self,
            b=None,
            cnn_ch_model_name='rep-26_perc-20_fold-1_part-70_kernel-6_model7',
            save_dir=None,
            bad_channels=[],
            filename='',
            load_predictions=True,
            use_adjusted_prediction=False,
            minimal_clean_duration=2000,
            minimal_artifact_duration=1000):
        '''Writes artifact info generated with manual_artifact_coder to xml files

		b 			block object
		cnn_model.. name of the cnn model that generated the predictions
		save_dir 	directory to save data 
		bad_chan... a list of bad_channel objects, can be empty
		filename 	xml filename, for loading or writing
		'''
        self.b = b
        self.cnn_ch_model_name = cnn_ch_model_name
        self.ch_names = utils.load_selection_ch_names()
        self.use_adjusted_prediction = use_adjusted_prediction
        self.minimal_artifact_duration = minimal_artifact_duration
        self.minimal_clean_duration = minimal_clean_duration
        if save_dir == None: self.save_dir = path.artifact_ch_cnn_xml
        elif not os.path.isdir(save_dir):
            print('Could not locate:', save_dir, 'using default:',
                  path.artifact_ch_cnn_xml)
            self.save_dir = path.artifact_ch_cnn_xml
        else:
            self.save_dir = save_dir
        if self.b != None:
            self.w = windower.Windower(b, sf=100)
            self.name = windower.make_name(self.b)
            self.filename = make_filename(self.w, self.cnn_ch_model_name)
            if load_predictions: self.load_predictions()
            else: self.loaded = False
            if self.loaded: self.set_indices()
        else:
            self.bad_channels = bad_channels
            self.filename = filename
            self.nclean_indices = 'NA'
            self.nartifact_indices = 'NA'
        # self.make_index_info()
        self.cnn_result = etree.Element('artifacts')
コード例 #7
0
    def __init__(self,
                 b=None,
                 cnn_model_name='rep-3_perc-50_fold-2_part-90',
                 save_dir=None,
                 bad_epochs=[],
                 filename='',
                 load_predictions=True,
                 use_corrected=True):
        '''Writes artifact info generated with manual_artifact_coder to xml files

		w 			windower object
		cnn_model.. name of the cnn model that generated the predictions
		save_dir 	directory to save data 
		bad_epochs 	a list of bad_epoch objects, can be empty
		filename 	xml filename, for loading or writing
		use_cor... 	whether to use the xml that is based on automatic cnn annotation and manually 
					corrected, if no such file exists it will revert to the auto file
		'''
        self.b = b
        self.cnn_model_name = cnn_model_name
        if save_dir == None: self.save_dir = path.artifact_cnn_xml
        elif not os.path.isdir(save_dir):
            print('Could not locate:', save_dir, 'using default:',
                  path.artifact_cnn_xml)
            self.save_dir = path.artifact_cnn_xml
        else:
            self.save_dir = save_dir
        if self.b != None:
            self.w = windower.Windower(b, sf=100)
            self.name = windower.make_name(self.b)
            self.filename = make_filename(self.w, self.cnn_model_name,
                                          use_corrected)
            if load_predictions: self.load_predictions()
            else: self.loaded = False
            if self.loaded: self.set_indices()
        else:
            self.bad_epochs = bad_epochs
            self.filename = filename
            self.nclean_indices = 'NA'
            self.nartifact_indices = 'NA'
        # self.make_index_info()
        self.cnn_result = etree.Element('artifacts')
コード例 #8
0
def make_matrix(fo,
                add_pp_info=False,
                normalize_data=True,
                save_directory=None,
                make_data=True,
                start_pp=1,
                overwrite=False):
    print(
        'making artifact training np matrices per block, with default 1 sec and 99 perc overlap'
    )
    if save_directory == None:
        save_directory = path.channel_artifact_training_data
    print('save directory:', save_directory)
    if make_data: fout = open(path.bak + 'channel_nrows_per_block-p', 'w')
    nrows = 0

    for i in range(start_pp, 49):
        p = e.Participant(i, fid2ort=fo)
        p.add_all_sessions()
        for s in p.sessions:
            for b in s.blocks:
                if os.path.isfile(path.channel_artifact_training_data +
                                  windower.make_name(b) +
                                  '_data.npy') and not overwrite:
                    # check whether windowed data is already present
                    continue
                if not os.path.isfile(path.eeg100hz + windower.make_name(b) +
                                      '.npy'):
                    # check whether downsampled data is present to load
                    continue
                print(windower.make_name(b))
                d = load_100hz_numpy_block(windower.make_name(b))
                if b.start_marker_missing or b.end_marker_missing:
                    w = windower.Windower(b,
                                          nsamples=d.shape[1] * 10,
                                          sf=100,
                                          window_overlap_percentage=.99)
                else:
                    w = windower.Windower(b,
                                          sf=100,
                                          window_overlap_percentage=.99)
                f = windower.block2channel_fn_annotation(
                    w.b, path.channel_artifacts_clean)
                if f == 0:
                    print('skipping:', w.name, 'NO ANNOTATION')
                    continue  # if there is no annotation file skip
                print('processing:', w.name, w.fn_annotation)
                w.make_channel_ca_info_matrix(add_pp_info=add_pp_info)
                if make_data:
                    d = remove_channels(d)
                    d = windower.window_data(d,
                                             w.windows['sf100'],
                                             flatten=True,
                                             normalize=normalize_data,
                                             cut_off=300)
                    rows = d.shape[0]
                    nrows += rows
                    fout.write(w.name + '\t' + str(rows) + '\n')
                    print(d.shape, w.info_matrix.shape[0])
                    assert d.shape[0] == w.info_matrix.shape[0]
                    # d = insert_target_channel_rows(d,nchannels=26,kernel_size=6)
                    # IMPORTANT: this was not commented when I checked the file
                    # however resulting files look like it was commented during execution
                    # insert target channel is done later in pipeline
                    # not sure what happened
                    np.save(save_directory + w.name + '_data', d)
                np.save(save_directory + w.name + '_info', w.info_matrix)

    if make_data:
        fout.write(path.bak + 'all_blocks\t' + str(nrows) + '\n')
        fout.close()
コード例 #9
0
def load_windower(f, fo):
    b = filename2block(f, fo)
    w = windower.Windower(b, window_overlap_percentage=.99, sf=100)
    w.make_ca_info_matrix(True)
    return w