示例#1
0
def pre_post_freqratio_comparison():

	for subjID in subjIDs:
		d = pd.HDFStore(os.path.join(studydir, 'Combined', '%s_silent.h5' % subjID))
		df = d['df']
		d.close()

		df = mark_good(df)
		df = df[df.good]
		df['FFTabs'] = np.abs(df.FFT)

		lowhighbands = dict(low=[1,8], high=[30,70])
		df = EEG.calc_freqband_power_all(df, lowhighbands, fft_freq)

		df['highlow'] = df.high / df.low
		epochgp = df.groupby(['hemi', 'epoch'])
		epochmean = epochgp.highlow.apply(np.mean)
		epocherr = epochgp.highlow.apply(np.std)

		colors = dict(Pre = 'b', Post = 'r')
		axs = dict(l = 1, r = 2)
		fig = plt.figure()
		for hemi in hemis:
			ax = fig.add_subplot(1, 2, axs[hemi])
			ax.set_title('%s hemisphere' % hemi)
			ax.bar([0, 1], epochmean[hemi][::-1], yerr = epocherr[hemi])
			ax.set_xticks([0.4, 1.4])
			ax.set_xticklabels(epochmean[hemi].index[::-1])

		fig.suptitle('%s (%s ear lesion)' % (subjID, lesionear[subjID]))
		fig.savefig(os.path.join(studydir, 'Analysis', '%s_freqratio.png' % subjID))
		fig.suptitle('')
		fig.clf();
示例#2
0
    def __init__(self):
        QThread.__init__(self)
        self.current_challenge = None
        self.challenges = defaultdict(list)
        self.progress = ProgressBarThread.ProgressBarThread()
        self.progress.signal_progress.connect(self.change_progress)
        self.progress.signal_toggle_lights.connect(self.prompter_toggle_lights)
        self.progress.signal_start_new_stage.connect(self.start_new_stage)
        #self.camera_thread = Camera.WebcamRecorder()  # Main Camera thread
        self.eeg_thread = EEG.EEG()  # Main EEG client thread
        self.eeg_thread.sample_signal.connect(self.get_samples)
        self.mediaPlayer = QMediaPlayer(None, QMediaPlayer.StreamPlayback)
        self.beeper_thread = Beeper.Beeper()  # beeper thread
        self.current_samples = []
        self.current_original_duration = 0
        self.current_eyes_open = True
        self.current_movie = ""
        self.used_challenges = []
        self.current_state_filename = ""
        self.session_labels_dict = dict()
        self.symbols_count = 15  # number of repetitions for each class
        self.symbols_status = defaultdict(lambda: 0)  # keep track


        # EEG samples collection
        self.current_log_saving = False
        self.current_log_filename = ""
示例#3
0
文件: views.py 项目: Vishal1003/SEG
def upload(request):
    context = {}
    if request.method == 'POST':
        uploaded_file = request.FILES['document']
        fs = FileSystemStorage()
        name = fs.save(uploaded_file.name, uploaded_file)
        url = fs.url(name)
        context['url'] = url
        data = eg.predict(pd.read_csv("/home/sjsingh/Desktop/UI/" + url))
        print(data)
    return render(request, 'result.html', {'data': data})
示例#4
0
def add_lfp_filt(f, fs = 384.384384384):

	lfp = f['lfp'].value
	nchan, ntrials, nsamp = lfp.shape

	lfp_filt = np.empty_like(lfp)

	for i in range(nchan):
		for j in range(ntrials):
			lfp_filt[i, j, :] = EEG.remove_60hz(lfp[i, j, :])

	f.create_dataset('lfp_filt', data = lfp_filt, compression = 'gzip')
示例#5
0
def combine(epoch = 'Pre', stimtype = 'noise'):

	'''
	combine left and right ear stimulation blocks into one pandas DataFrame
	'''
	sesspaths = glob.glob(os.path.join(studydir, 'Sessions', epoch, '*'))
	
	for sesspath in sesspaths:
		absol, sessID = os.path.split(sesspath)
		print sessID
		fpaths = glob.glob(os.path.join(sesspath, 'fileconversion', '*.h5'))
		subjIDs = np.unique([os.path.splitext(os.path.split(fpath)[1])[0].split('_')[0] for fpath in fpaths])

		hemis = 'rl'
		ears = 'rl'

		for subjID in subjIDs:

			LFP = []; LFPFILT = []; ATTEN = []; EAR = []; HEMI = []; RELHEMI = []; SESSID = [];

			for ear in ears:

				fpath = glob.glob(os.path.join(sesspath, 'fileconversion', '%s_%s_%s_b*.h5' % (subjID, ear.upper(), stimtype)))[0]

				fin = h5py.File(fpath, 'r')
				lfp = fin['lfp'].value
				stimparams = fin['stimID'].value
				fin.close()

				nchan, ntrials, nsamp = lfp.shape

				for i in xrange(nchan):

					[LFP.append(lfp[i, j, :]) for j in xrange(ntrials)]
					[LFPFILT.append(EEG.remove_60hz(lfp[i, j, :])) for j in xrange(ntrials)]
					ATTEN.extend(stimparams[:, 1].tolist())
					EAR.extend([ear]*ntrials)
					HEMI.extend([hemis[i]]*ntrials)
					if ear==hemis[i]:
						relhemi = 'ipsi'
					elif ear!=hemis[i]:
						relhemi = 'contra'
					RELHEMI.extend([relhemi]*ntrials)
					SESSID.extend([sessID]*ntrials)

			d = dict(lfp = LFP, lfpfilt = LFPFILT, atten = ATTEN, ear = EAR, hemi = HEMI, relhemi = RELHEMI, sess = SESSID)
			df = pd.DataFrame(d)

			fout = pd.HDFStore(os.path.join(sesspath, 'both', '%s_both.h5' % subjID))
			fout['df'] = df
			fout.close()
示例#6
0
    def __init__(self):
        QThread.__init__(self)
        self.current_challenge = None
        self.challenges = defaultdict(list)
        self.progress = ProgressBarThread.ProgressBarThread()
        self.progress.signal_progress.connect(self.change_progress)
        self.progress.signal_toggle_lights.connect(self.prompter_toggle_lights)
        self.progress.signal_start_new_stage.connect(self.start_new_stage)
        self.camera_thread = Camera.WebcamRecorder()  # Main Camera thread
        self.eeg_thread = EEG.EEG()  # Main EEG client thread
        self.eeg_thread.sample_signal.connect(self.get_samples)
        self.mediaPlayer = QMediaPlayer(None, QMediaPlayer.StreamPlayback)
        self.current_samples = []
        self.current_original_duration = 0
        self.current_eyes_open = True
        self.current_movie = ""
        self.used_challenges = []

        # EEG samples collection
        self.current_log_saving = False
        self.current_log_filename = ""
示例#7
0
# Training Parameters
learning_rate = 0.001
training_steps = 100
batch_size = 10
display_step = 1

# Network Parameters
num_input = 20  # data input (# signals)
# ~ timesteps = 1155 # timesteps
timesteps = 815  # timesteps
num_hidden = 128  # hidden layer num of features
num_classes = 2  # Two classes: Inter and pre

data_division = {}
patient_data = EEG.Patient_data(data_division, FLAGS.data_path)

# tf Graph input
X = tf.placeholder("float", [None, timesteps, num_input])
Y = tf.placeholder("float", [None, num_classes])


def RNN(x):
    # GPU version
    # ~ lstm_cuda = tf.contrib.cudnn_rnn.CudnnLSTM(1,num_hidden)
    # ~ outputs, _ = lstm_cuda(x)
    lstm_cell = tf.contrib.rnn.LSTMBlockCell(num_hidden, forget_bias=1.0)
    # ~ lstm_cell = tf.contrib.rnn.LSTMCell(num_hidden, forget_bias=1.0)
    outputs, _ = tf.nn.dynamic_rnn(cell=lstm_cell, inputs=x, dtype=tf.float32)
    # we take only the output at the last time
    # ~ fc_layer = tf.layers.dense(outputs[:,-1,:], 30, activation=tf.nn.relu)
示例#8
0
import numpy as np
#import pygtk

import EEG

#p = EEG.EEG('Dog_2', 'interictal', 17)
#p = EEG.EEG('Dog_2', 'preictal', 17)
#p = EEG.EEG('Patient_2', 'interictal', 17)
p = EEG.EEG('Patient_2', 'preictal', 17)
p.load()
print p
#p.normalize_channels()
p.normalize_overall()

#print np.shape(p.data)  == (16, ~240k)

data = p.data
eeg = np.rollaxis(data, 1)

# see p797 of matplotlib pdf : matshow() for 2d colour-map of (say) correlation matrix

#import matplotlib
#matplotlib.use('Qt4Agg')  # Needs PySide - difficult to install, large
#matplotlib.use('GTKAgg')  # Needs pygtk   - found script on Gist.github.com
#matplotlib.use('TkAgg')  # Needs Tkinter - difficult within VirtualEnv
import matplotlib.pyplot as plt

#data = p.data
n = p.n_channels
spacing = 5
trace_levels = spacing * np.arange(n, 0, -1)
示例#9
0
import config
import FC
import TCN
import RNN
import CNN

flags = tf.flags
flags.DEFINE_string("data_path", None,
                    "Where the training/test data is stored.")
flags.DEFINE_string("NN", None, "Type of neural network.")
flags.DEFINE_string("patient", None, "Patient number")
FLAGS = flags.FLAGS
cfg = config.Config(data_path=FLAGS.data_path,
                    NN=FLAGS.NN,
                    patient=int(FLAGS.patient))
patient_data = EEG.Patient_data(cfg)
cfg.N_features = np.shape(patient_data.segments)[1]
tf.random.set_random_seed(1)

# tf Graph input
X = tf.placeholder("float", [None, 1, cfg.N_features, cfg.num_inputs])
Y = tf.placeholder("float", [None, cfg.num_classes])

if (cfg.NN == "TCN"):
    logits = TCN.TCN(X, cfg)
elif (cfg.NN == "FC"):
    logits = FC.FC(X, cfg)
elif (cfg.NN == "RNN"):
    logits = RNN.RNN(X, cfg)
elif (cfg.NN == "CNN"):
    logits = CNN.CNN(X, cfg)
示例#10
0
	f = h5py.File(fpaths[0])

	nlfp = f['lfp'].value.shape[2]
	ustimid = np.unique(f['stimID'].value[:, 1])
	print ustimid

	print '\t\t#LFP samples:\t%u' % nlfp
	print '\t\t'
	f.close()

	


'''
def combine_pre_post():


	for subjID in subjIDs:
		for epoch in epochs:
		print subjID, epoch
		sesspaths = os.path.join(studydir, 'Sessions', epoch)
		for sesspath in sesspaths:'''
			
'''
EEG.fileconvert_all(experiment = 'awakeeeg', epoch = 'Pre')
convert_to_pd(epoch = 'Pre')

'''

示例#11
0
import EEG as t
import pandas as pd
t.predict(pd.read_csv('/home/sjsingh/Desktop/Smart EEG/test - test.csv'))
    
示例#12
0
all_sprites.add(PT1)
all_sprites.add(P1)
 
platforms = pygame.sprite.Group()
platforms.add(PT1)
 
for x in range(random.randint(4,5)):
    C = True
    pl = platform()
    while C:
        pl = platform()
        C = check(pl, platforms)
    platforms.add(pl)
    all_sprites.add(pl)

s = EEG.SerialGet(baud=BAUD, port=PORT)
length = len(s.ypr)

while True:

    s.readval()
    jumpstrength = 0

    P1.update()
    for event in pygame.event.get():
        if event.type == QUIT:
            pygame.quit()
            sys.exit()

    for i in range(length):
        if s.ypr[i] >= THRESHOLD:
示例#13
0
    to_hickle = dict(
        features=all_params,
        signal_period_starts=signal_period_starts,
    )

    # Dump data, with compression
    f = "data/feat/%s/%s_%s_segment_%04d.hickle" % (p.subject, p.subject,
                                                    p.desc, p.num)
    hickle.dump(to_hickle, f, mode='w', compression='gzip')


if (args.fft > 0):
    if False:  # Just do one sample
        #p = EEG.EEG(_subject, 'interictal', 17)
        p = EEG.EEG(_subject, 'preictal', 18)
        preprocess(p)
        exit(1)

    if True:  # Load in the survey, and do the fft thing for everything
        #d = "data/orig/%s/" % (_subject, )
        csv = open("data/survey.%s.csv" % (_subject, ), 'r')
        headers = csv.readline()
        for line in csv.readlines():
            p = EEG.EEG(_subject, '', '')  # Nonsense entries
            p.survey_line_read(line)

            preprocess(p)

if (args.scale > 0):
    ## concatinate entries  (NB, must do train first, to generate min/max meta-data)
示例#14
0
import argparse
parser = argparse.ArgumentParser(description='Survey the data')
parser.add_argument('--subject',
                    type=str,
                    required=True,
                    help="Dog_{1,2,3,4,5}, Patient_{1,2}")
args = parser.parse_args()

# 'Dog_2', 'Patient_2'
_subject = args.subject

d = "data/orig/%s/" % (_subject, )

csv = open("data/survey.%s.csv" % (_subject, ), 'w')
csv.write(EEG.EEG.survey_header() + "\n")

f_match = re.compile(r'%s_(.*?)_segment_(\d*?)\.mat' % (_subject, ))
for f in sorted(os.listdir(d)):
    m = re.match(f_match, f)
    if m is None: continue
    desc, num = m.group(1), int(m.group(2))

    print desc, num

    p = EEG.EEG(_subject, desc, num)
    p.load()
    csv.write(p.survey_line_write())

csv.close()