Example #1
0
def configurar(canales, tipo_canal, frecuencia, repeticiones):
    ch_types = []
    ch_names = []
    sfreq = frecuencia
    rand = randint(3, 5)
    contador = repeticiones * (8 + rand)
    #plt.ion( )
    #fig, fig_axes = plt.subplots(nrows=canales, ncols=1, constrained_layout=True, sharex=True)
    fig_axes = 0
    global amp
    available_amps = libmushu.get_available_amps()
    print available_amps
    ampname = available_amps[0]
    amp = libmushu.get_amp(GUSBamp)
    #amp = AmpDecorator(GUSBamp)
    #amp.configure(fs=frecuencia, channels=canales)
    nombre = time.asctime()
    nombre = nombre.replace(':',' ')
    tipo_canal = tipo_canal.lower()
    for i in range(0, canales + 2):
        if i < canales:
            ch_types.append(str(tipo_canal))
        if i >= canales:
            ch_types.append('stim')
    tipo_canal = tipo_canal.upper()
    for i in range(0, canales + 2):
        if i < canales:
            ch_names.append(str(tipo_canal) + ' 00' + str(i + 1))
        if i > canales:
            ch_names.append('STI 014')
        if i == canales + 1:
            ch_names.append('MK 000')
    info = mne.create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
    return info, nombre, fig_axes, contador
Example #2
0
    def __init__(self, amp_name, freq_sampling, len_buf_s, num_channels):

        multiprocessing.freeze_support()    # Needed on Windows

        #available_amps = libmushu.get_available_amps()
        #print('available_amps:', available_amps)
        #amp_name = available_amps[0]
        #print 'amp_name:', amp_name
        #amp = libmushu.get_amp(amp_name)
        #cfg = amp.presets
        #print 'cfg:', cfg
        #amp.configure(cfg)
        #amp.configure(mode='data', fs=128, channels=16)
        #amp.configure() no


        # Initialize gUSBamp
        #amp = libmushu.get_amp('gusbamp') it does not find it like this
        self.amp = libmushu.get_amp(amp_name)
        #amp.configure(fs=128, mode='recording')
        self.amp.configure() # Sampling freq is set in the gUSBamp client program
        self.amp.start()
        self.freq_s = freq_sampling


        # Initialize numpy buffers
        self.rec_data = np.zeros((len_buf_s * freq_sampling, num_channels))
        self.rec_times = np.zeros(len_buf_s * freq_sampling)
        print 'self.rec_data.shape at init:', self.rec_data.shape


        # Initialize flags
        self.is_new_data_available = False
        self.is_rec_stopped = False
        self.is_rec_finished = False


        # Other
        self.i_time_fast = 0
        self.new_data_counter = 0


        # Get some initial data
        len_initial_data_temp = 0
        while len_initial_data_temp < mi_params.LEN_INITIAL_DATA:
            data, _ = self.amp.get_data()
            len_initial_data_temp += data.shape[0]
Example #3
0
    def __init__(self, amp_name, freq_sampling, len_buf_s, num_channels):

        multiprocessing.freeze_support()  # Needed on Windows

        #available_amps = libmushu.get_available_amps()
        #print('available_amps:', available_amps)
        #amp_name = available_amps[0]
        #print 'amp_name:', amp_name
        #amp = libmushu.get_amp(amp_name)
        #cfg = amp.presets
        #print 'cfg:', cfg
        #amp.configure(cfg)
        #amp.configure(mode='data', fs=128, channels=16)
        #amp.configure() no

        # Initialize gUSBamp
        #amp = libmushu.get_amp('gusbamp') it does not find it like this
        self.amp = libmushu.get_amp(amp_name)
        #amp.configure(fs=128, mode='recording')
        self.amp.configure(
        )  # Sampling freq is set in the gUSBamp client program
        self.amp.start()
        self.freq_s = freq_sampling

        # Initialize numpy buffers
        self.rec_data = np.zeros((len_buf_s * freq_sampling, num_channels))
        self.rec_times = np.zeros(len_buf_s * freq_sampling)
        print 'self.rec_data.shape at init:', self.rec_data.shape

        # Initialize flags
        self.is_new_data_available = False
        self.is_rec_stopped = False
        self.is_rec_finished = False

        # Other
        self.i_time_fast = 0
        self.new_data_counter = 0

        # Get some initial data
        len_initial_data_temp = 0
        while len_initial_data_temp < params.LEN_INITIAL_DATA:
            data, _ = self.amp.get_data()
            len_initial_data_temp += data.shape[0]
Example #4
0
    def __init__(self, master):
        self.amp_started = False

        ttk.Frame.__init__(self, master)
        self.master.title('Mushu')
        self.pack()

        available_amps = libmushu.get_available_amps()

        frame = tk.Frame(self)
        frame.pack(fill=tk.BOTH, expand=1)
        self.label1 = ttk.Label(frame, text='Select Amplifier')
        self.label1.grid(column=0, row=0, sticky='we')
        self.amp_combobox = ttk.Combobox(frame, values=[str(i) for i in available_amps])
        self.amp_combobox.grid(column=0, row=1, sticky='we')
        self.label2 = ttk.Label(frame, text='Configure Amplifier')
        self.label2.grid(column=1, row=0, sticky='we')
        self.configure_button = ttk.Button(frame, text='Configure', command=self.onConfigureButtonClicked)
        self.configure_button.grid(column=1, row=1, sticky='we')
        self.label3 = ttk.Label(frame, text='Start/Stop Amplifier')
        self.label3.grid(column=2, row=0, sticky='we')
        self.start_stop_button = ttk.Button(frame, text='Start', command=self.onStartStopButtonClicked)
        self.start_stop_button.grid(column=2, row=1, sticky='we')

        # set up the figure
        fig = Figure()
        self.canvas = FigureCanvas(fig, master=self.master)
        self.canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)
        self.canvas.show()
        self.axis = fig.add_subplot(111)

        ampname = available_amps[0]
        self.amp = libmushu.get_amp(ampname)
        self.amp.start()

        self.channels = self.amp.get_channels()
        self.n_channels = len(self.channels)
        self.PAST_POINTS = 256
        self.SCALE = 30000

        self.init_plot()
        self.master.after_idle(self.visualizer)
Example #5
0
    #from wyrm import plot
    #plot.plot_spatio_temporal_r2_values(proc.sort_channels(epo))
    #print JUMPING_MEANS_IVALS
    #plot.plt.show()

    fv = proc.jumping_means(epo, JUMPING_MEANS_IVALS)
    fv = proc.create_feature_vectors(fv)

    clf = proc.lda_train(fv)
    return clf


if __name__ == '__main__':
    logger.debug('Training...')
    clf = train(TRAIN_DATA)

    logger.debug('Starting Online experiment...')
    cnt = io.load_bcicomp3_ds2(TEST_DATA)
    amp = libmushu.get_amp('replayamp')
    # fast (non-realtime)
    amp.configure(data=cnt.data,
                  marker=cnt.markers,
                  channels=cnt.axes[-1],
                  fs=cnt.fs,
                  realtime=False,
                  samples=1000)
    # slow (realtime)
    #amp.configure(data=cnt.data, marker=cnt.markers, channels=cnt.axes[-1], fs=cnt.fs)
    online_experiment(amp, clf)
fs = 5000
TR = 1.950
trsamples = int(TR * fs)
# you need to change the # of channels here, most likely
hpf = HPF(f=1.0, fs=fs, order=3, nbchan=nbchan)
lpf = LPF(f=1.0, fs=fs, order=3, nbchan=nbchan)
bpf = BPF(f=[12.0, 15.0], fs=fs, order=3, nbchan=nbchan)
#mr=MR(trsamples=10000, N_thr=5, corr_thr = 0.995, forget=6)
mr = MR(trsamples=trsamples,
        N_thr=5,
        corr_thr=0.995,
        forget=5,
        highpass=[3, 1.0, fs])

# make an 'amp' that reads in the data stream (LSL)
amp = libmushu.get_amp('lslamp')
amp.configure()  # this sets up the LSL making us able to use it
# if you wish to change settings - look in mushu/libmushu/drivers/labstreaminglayer.py
# you can use the python API of labstreaminglayer to fix things there
# https://github.com/labstreaminglayer/liblsl-Python/tree/b158b57f66bc82230ff5ad0433fbd4480766a849

# make a new LSL stream to send data away:
# first create a new stream info (here we set the name to BioSemi,
# the content-type to EEG, 8 channels, 100 Hz, and float-valued data) The
# last value would be the serial number of the device or some other more or
# less locally unique identifier for the stream as far as available (you
# could also omit it but interrupted connections wouldn't auto-recover)
info = StreamInfo('Python', 'EEG', nbchan, fs, 'float32', 'corrected')

outlet = StreamOutlet(info)
Example #7
0
import libmushu

# look for amplifiers connected to the system, and return a list of the
# respective classes
available_amps = libmushu.get_available_amps()

# select the first available amp and decorate it with tcp-marker- and
# save-to-file-functionality
ampname = available_amps[0]
amp = libmushu.get_amp(ampname)

# configure the amplifier
# amp.configure(cfg)

# start it and collect data until finished
amp.start()
while 1:
    data, trigger = amp.get_data()

# stop the amplifier
amp.stop()
Example #8
0
 def on_amplifier_selected(self, event):
     idx = event.widget.current()
     ampstr = self.available_amps[idx]
     amp = libmushu.get_amp(ampstr)
     self.set_amplifier(amp)
Example #9
0
import libmushu

# look for amplifiers connected to the system, and return a list of the
# respective classes
available_amps = libmushu.get_available_amps()

# select the first available amp and decorate it with tcp-marker- and
# save-to-file-functionality
ampname = available_amps[0]
amp = libmushu.get_amp(ampname)

# configure the amplifier
amp.configure(cfg)

# start it and collect data until finished
amp.start()
while 1:
    data, trigger = amp.get_data()

# stop the amplifier
amp.stop()
Example #10
0
 def on_amplifier_selected(self, event):
     idx = event.widget.current()
     ampstr = self.available_amps[idx]
     amp = libmushu.get_amp(ampstr)
     self.set_amplifier(amp)
Example #11
0
    cnt = proc.subsample(cnt, 60)

    epo = proc.segment_dat(cnt, MARKER_DEF_TRAIN, SEG_IVAL)

    #from wyrm import plot
    #plot.plot_spatio_temporal_r2_values(proc.sort_channels(epo))
    #print JUMPING_MEANS_IVALS
    #plot.plt.show()

    fv = proc.jumping_means(epo, JUMPING_MEANS_IVALS)
    fv = proc.create_feature_vectors(fv)

    cfy = proc.lda_train(fv)
    return cfy


if __name__ == '__main__':
    logger.debug('Training...')
    cfy = train(TRAIN_DATA)

    logger.debug('Starting Online experiment...')
    cnt = io.load_bcicomp3_ds2(TEST_DATA)
    amp = libmushu.get_amp('replayamp')
    if REALTIME:
        amp.configure(data=cnt.data, marker=cnt.markers, channels=cnt.axes[-1], fs=cnt.fs, blocksize_samples=4)
    else:
        amp.configure(data=cnt.data, marker=cnt.markers, channels=cnt.axes[-1], fs=cnt.fs, realtime=False, blocksize_samples=40)
    online_experiment(amp, cfy)

 def setUp(self):
     self.amp = libmushu.get_amp('replayamp')
Example #13
0
 def setUp(self):
     self.amp = libmushu.get_amp('replayamp')
Example #14
0
def plot_psd_pb(filename, metafile, markerfile, *args, **kwargs):
    ''' 
    This function plots the PSD (in dB) of the frequency range of your choice.
    This function is to be used after EEG acquisition. The PSD compares two events 
    if you so choose (such as eyes open-eyes closed) or you could plot just one 
    event and look at the topographical distribution at a particular frequency on the scalp.
    
    Uses MNE-Python.
        
        
    Sample usage: plot_psd_pb('file.eeg', 'file.meta', 'file.marker', 
                                frequency_range=[9, 12], n_samples=500000, 
                                replay, n_fft=256, montage='biosemi64',
                                resample_freq=1000)
    
    NOTE: At present, this only works for comparing eyes open and eyes closed with the marker codes 201, 202, 203 and 204
    from the EEG acquisition using the EEG-fMRI Localiser.
    Additionally, function can output a raw array "rt_raw":
        raw = plot_psd_pb('file.eeg', 'file.meta', 'file.marker', 
                                frequency_range=[9, 12], n_samples=500000, 
                                replay, n_fft=256, montage='biosemi64',
                                resample_freq=1000)
    '''

    import struct
    import json
    import time
    import matplotlib
    try:
        matplotlib.use('QT5Agg')
    except:
        matplotlib.use('QT4Agg')
    import numpy as np
    import matplotlib.pyplot as plt
    import mne
    from mne.time_frequency import psd_welch
    import re

    import sys
    sys.path.append('../../mushu')
    sys.path.append('../../mushu/libmushu')

    import libmushu
    amp = libmushu.get_amp('replayamp')

    f = open(filename, 'r')
    raw = np.fromfile(f, dtype=np.float32)
    raw = raw.reshape(round(len(raw) / 64), 64).transpose()
    f.close()

    # get markers and meta
    fhj = open(metafile)
    meta = json.load(fhj)
    fhj.close()

    if 'n_samples' not in kwargs.keys():
        n_samples = 500000
    else:
        if kwargs['n_samples'] > len(raw):
            raise ValueError(
                'Number of samples cannot be greater than the number of samples in the EEG file (%d)'
                % len(raw))
        n_samples = kwargs['n_samples']

    if 'montage' not in kwargs.keys():
        montage = mne.channels.read_montage(kind='biosemi64')

    else:
        montage = mne.channels.read_montage(kind=kwargs['montage'])

    with open(markerfile) as file:
        content = file.readlines()

    content = [x.strip() for x in content]

    ## create the marker matrix

    ev_arr = []
    for i, item in enumerate(content):
        out = re.split("[\s|S|T]+", item)
        if 'Sync Off' in item[1]:
            sample = int(float(out[0]) / 1000 * meta['Sampling Frequency'])
            code = 250
        elif '201' in out[1]:
            sample = int(float(out[0]) / 1000 * meta['Sampling Frequency'])
            code = int(out[1])
            ev_arr.append([sample, 0, code])

        elif '202' in out[1]:
            sample = int(float(out[0]) / 1000 * meta['Sampling Frequency'])
            code = int(out[1])
            ev_arr.append([sample, 0, code])

        elif '203' in out[1]:
            sample = int(float(out[0]) / 1000 * meta['Sampling Frequency'])
            code = int(out[1])
            ev_arr.append([sample, 0, code])

        elif '204' in out[1]:
            sample = int(float(out[0]) / 1000 * meta['Sampling Frequency'])
            code = int(out[1])
            ev_arr.append([sample, 0, code])

    ch = montage.ch_names[:64]
    fs = meta['Sampling Frequency']
    eo1_s = ev_arr[0][0]
    eo1_f = ev_arr[1][0]
    ec1_s = ev_arr[2][0]
    ec1_f = ev_arr[3][0]
    eo2_s = ev_arr[4][0]
    eo2_f = ev_arr[5][0]
    ec2_s = ev_arr[6][0]
    ec2_f = ev_arr[7][0]
    m1 = raw[:, eo1_s:ec1_f]
    m2 = raw[:, eo2_s:ec2_f]
    m = np.hstack((m1, m2))
    m = np.array(m.transpose())

    ## replay file
    if 'replay' in args:

        amp.configure(m, (), ch, fs, realtime=True, blocksize_samples=20)
        amp.start()
        alld = []
        allm = []
        starttime = time.time()
        newtime = starttime
        i = 0
        while time.time() - starttime < n_samples / fs:

            while time.time() - newtime < 0.5:
                pass
            else:
                data, marker = amp.get_data()

                alld.append(data)
                for m in marker:
                    allm.append(marker)
                print(marker)

            print('%d' % i, end='', flush=True)
            i += 1
            newtime += 0.5

        amp.stop()
        m = np.concatenate(alld)

    if 'frequency_range' in kwargs.keys():
        if len(kwargs['frequency_range']) is not 2:
            raise ValueError('Frequency range length is %d, needs to be 2.' %
                             len(frequency_range))
        frequency_range = kwargs['frequency_range']
    else:
        frequency_range = [2, 25]

    fmin, fmax = frequency_range

    #create info
    info = mne.create_info(ch_names=ch,
                           ch_types=['eeg' for i in range(len(ch))],
                           sfreq=fs,
                           montage=montage)

    ## get data
    rt = np.transpose(m)
    rt_raw = mne.io.RawArray(rt, info)

    # create marker channel for MNE python:
    if ev_arr:
        ev_arr[0][0] = 0
        ev_arr[1][0] = eo1_f - eo1_s
        ev_arr[2][0] = ec1_s - eo1_s
        ev_arr[3][0] = ec1_f - eo1_s - 10
        ev_arr[4][0] = ec1_f - eo1_s
        ev_arr[5][0] = (ec1_f - eo1_s) + (eo2_f - eo2_s)
        ev_arr[6][0] = (ec1_f - eo1_s) + (eo2_f - eo2_s) + (ec2_s - eo2_f)
        ev_arr[7][0] = (ec1_f - eo1_s) + (eo2_f - eo2_s) + (ec2_s - eo2_f) + (
            ec2_f - ec2_s) - 1
        print(ev_arr)
        stim_info = mne.create_info(['STI'], rt_raw.info['sfreq'], ['stim'])
        stim_data = np.zeros((1, len(rt_raw.times)))
        stim_raw = mne.io.RawArray(stim_data, stim_info)
        rt_raw.add_channels([stim_raw], force_update_info=True)

        # create the marker matrix:
        rt_raw.add_events(ev_arr)

    events = mne.find_events(rt_raw, initial_event=True)

    if 'resample_freq' in kwargs.keys():
        print('Resampling to %f, please be patient...' %
              kwargs['resample_freq'])
        rt_raw.resample(kwargs['resample_freq'], npad='auto')

    mne.set_eeg_reference(rt_raw)

    ## eyes open
    raw_eo1 = rt_raw.copy().crop(events[0][0] / info['sfreq'],
                                 events[1][0] / info['sfreq'])
    raw_eo2 = rt_raw.copy().crop(events[4][0] / info['sfreq'],
                                 events[5][0] / info['sfreq'])
    raw_eo = rt_raw.copy().crop(0, 0.1)
    raw_eo.append([raw_eo1, raw_eo2])

    ## eyes closed
    raw_ec1 = rt_raw.copy().crop(events[2][0] / info['sfreq'],
                                 events[3][0] / info['sfreq'])
    raw_ec2 = rt_raw.copy().crop(events[6][0] / info['sfreq'],
                                 events[7][0] / info['sfreq'])
    raw_ec = rt_raw.copy().crop(0, 0.1)
    raw_ec.append([raw_ec1, raw_ec2])

    # do psds
    if 'n_fft' not in kwargs.keys():
        n_fft = 16384
    else:
        n_fft = kwargs['n_fft']

    psd_eo, freqs_eo = psd_welch(raw_eo, fmin=fmin, fmax=fmax, n_fft=n_fft)
    psd_ec, freqs_ec = psd_welch(raw_ec, fmin=fmin, fmax=fmax, n_fft=n_fft)

    log_psd_eo = 10 * np.log10(psd_eo)
    log_psd_ec = 10 * np.log10(psd_ec)
    fig, ax = plt.subplots(1, 2)
    psds_mean_eo = log_psd_eo.mean(0)
    psds_mean_ec = log_psd_ec.mean(0)
    psds_diff = log_psd_ec - log_psd_eo
    psds_diff_mean = psds_diff.mean(0)
    x = [
        freqs_eo[int(
            np.where(psds_diff_mean == np.max(psds_diff_mean))[0][0] -
            np.std(psds_diff_mean).round())], freqs_eo[int(
                np.where(psds_diff_mean == np.max(psds_diff_mean))[0][0] +
                np.std(psds_diff_mean).round())]
    ]

    ## plot psds
    ax[0].plot(freqs_eo, psds_mean_eo, color='r', label='Eyes open')
    ax[0].plot(freqs_ec, psds_mean_ec, color='b', label='Eyes closed')
    ax[0].plot(freqs_eo, psds_diff_mean, color='g', label='Difference')
    ax[0].vlines(
        x,
        ymin=np.min(psds_diff_mean),
        ymax=np.max(psds_diff_mean) + 5,
        colors='m',
        label=('Suggested frequency bounds for NF: {} Hz and {} Hz').format(
            round(x[0]), round(x[1])))
    ax[0].legend()
    ax[0].set(title='Welch PSD (EEG)',
              xlabel='Frequency',
              ylabel='Mean Power Spectral Density (dB)',
              xticks=np.arange(round(np.min(freqs_ec)),
                               round(np.max(freqs_eo)),
                               step=2))

    ## plot topomap
    im, fl = mne.viz.plot_topomap(
        psds_diff.T[np.where(psds_diff_mean == np.max(psds_diff_mean))[0][0]],
        pos=info,
        axes=ax[1],
        vmin=-np.max(psds_diff.T[np.where(
            psds_diff_mean == np.max(psds_diff_mean))[0][0]]),
        vmax=np.max(psds_diff.T[np.where(
            psds_diff_mean == np.max(psds_diff_mean))[0][0]]))
    cbar = fig.colorbar(im, ax=ax[1])
    cbarlabel = ('Power Spectral Density (dB) at {} Hz').format(
        freqs_eo[np.where(psds_diff_mean == np.max(psds_diff_mean))[0][0]])
    cbar.set_label(cbarlabel)
    fig.tight_layout()
    print('Suggested frequency bounds for NF: {} Hz and {} Hz'.format(
        round(x[0]), round(x[1])))
    if 'show_fig' in args:
        fig.show()
    return rt_raw