コード例 #1
0
def communicate(call, get_ready, sos):
    errors = 0
    error_sos = 0
    sos_time = 0

    timer = core.Clock()
    current_image = visual.ImageStim(mywin,
                                     image="png/mobile.png")  # set image
    current_image.draw()
    get_ready.text = 'You want to socialize. You can call anyone on the list using events of type "call" and values "1", "2" or "3". Right now, you want to call Livia. Note: each error will add 15s to your total time. \n\nPress any key to start'
    showText(get_ready)
    keys = waitForKeypress()

    mywin.flip()
    current_image.draw()
    mywin.flip()
    timer.reset()

    triggerevents = ["call"]
    stopevent = ("call", "end")
    trlen_samp = 50
    state = []
    endCall = False
    current_idx = 0
    print("Waiting for triggers: %s and endtrigger: %s.%s" %
          (triggerevents[0], stopevent[0], stopevent[1]))
    while endCall is False:
        # grab data after every t:'stimulus' event until we get a {t:'stimulus.training' v:'end'} event
        #data, events, stopevents, state = bufhelp.gatherdata(triggerevents,trlen_samp,stopevent, state, milliseconds=False)
        data, events, stopevents, state = bufhelp.gatherdata(
            triggerevents, trlen_samp, [], state, milliseconds=False)
        for ei in np.arange(len(events) - 1, -1, -1):
            ev = events[ei]
            if ev.type == "call":
                if ev.value == "1":
                    current_image = visual.ImageStim(
                        mywin, "png/mobile_mom.png")  # set image
                    if sos:
                        [sos_time,
                         error_sos] = sos_button(get_ready, current_image)
                        sos = False
                elif ev.value == "2":
                    current_image = visual.ImageStim(
                        mywin, "png/mobile_terry.png")  # set image
                else:
                    current_image = visual.ImageStim(
                        mywin, "png/mobile_livia.png")  # set image
                current_image.draw()
                mywin.flip()
            if ev.value == call:
                endCall = True
                current_image = visual.ImageStim(
                    mywin, "png/mobile_done.png")  # set image
                current_image.draw()
                mywin.flip()
                core.wait(2)
            else:
                errors = errors + 1
    return [timer.getTime(), errors, error_sos, sos_time]
コード例 #2
0
def navigate(path, navigation, error, get_ready, sos):
    errors = 0
    error_sos = 0
    sos_time = 0
    timer = core.Clock()

    current_image = visual.ImageStim(mywin,
                                     image="png/navigation1.png")  # set image
    current_image.draw()
    get_ready.text = 'Navigate from the front door to the couch using events of type "navigate" with values "left","right","down" and "up". Each dot represents one required event.\n\nNote: each error will add 15s to your total time. \n\nPress any key to start'
    showText(get_ready)
    keys = waitForKeypress()

    mywin.flip()
    current_image.draw()
    mywin.flip()
    timer.reset()

    triggerevents = ["navigate"]
    stopevent = ("navigate", "end")
    trlen_samp = 50
    state = []
    endNavigate = False
    current_idx = 0
    print("Waiting for triggers: %s and endtrigger: %s.%s" %
          (triggerevents[0], stopevent[0], stopevent[1]))
    while endNavigate is False:
        # grab data after every t:'stimulus' event until we get a {t:'stimulus.training' v:'end'} event
        #data, events, stopevents, state = bufhelp.gatherdata(triggerevents,trlen_samp,stopevent, state, milliseconds=False)
        data, events, stopevents, state = bufhelp.gatherdata(
            triggerevents, trlen_samp, [], state, milliseconds=False)
        for ei in np.arange(len(events) - 1, -1, -1):
            ev = events[ei]
            if ev.type == "navigate":
                if ev.value == path[current_idx]:
                    current_idx = current_idx + 1
                    current_image = visual.ImageStim(
                        mywin, navigation[current_idx])  # set image
                else:
                    current_image = visual.ImageStim(
                        mywin, error[current_idx])  # set image
                    errors = errors + 1
                current_image.draw()
                mywin.flip()
            if current_idx == len(navigation) - 1:
                current_image = visual.ImageStim(
                    mywin, "png/navigation_done.png")  # set image
                current_image.draw()
                mywin.flip()
                core.wait(2)
                endNavigate = True
            if current_idx > 5 and sos:
                [sos_time, error_sos] = sos_button(get_ready, current_image)
                sos = False
    return [timer.getTime(), errors, error_sos, sos_time]
コード例 #3
0
def communicate(call,get_ready,sos):
    errors = 0
    error_sos = 0
    sos_time = 0
    
    timer = core.Clock()
    current_image = visual.ImageStim(mywin, image="png/mobile.png") # set image
    current_image.draw()
    get_ready.text ='You want to socialize. You can call anyone on the list using events of type "call" and values "1", "2" or "3". Right now, you want to call Livia. Note: each error will add 15s to your total time. \n\nPress any key to start'
    showText(get_ready)
    keys = waitForKeypress()
    
    mywin.flip()
    current_image.draw()
    mywin.flip()
    timer.reset()
    
    triggerevents=["call"]
    stopevent=("call","end")
    trlen_samp = 50
    state = []
    endCall = False
    current_idx = 0
    print("Waiting for triggers: %s and endtrigger: %s.%s"%(triggerevents[0],stopevent[0],stopevent[1]))
    while endCall is False:
        # grab data after every t:'stimulus' event until we get a {t:'stimulus.training' v:'end'} event 
        #data, events, stopevents, state = bufhelp.gatherdata(triggerevents,trlen_samp,stopevent, state, milliseconds=False)
        data, events, stopevents, state = bufhelp.gatherdata(triggerevents,trlen_samp,[], state, milliseconds=False)
        for ei in np.arange(len(events)-1,-1,-1):
            ev = events[ei]
            if ev.type == "call":
                if ev.value == "1":
                    current_image = visual.ImageStim(mywin, "png/mobile_mom.png") # set image
                    if sos:
                        [sos_time,error_sos] = sos_button(get_ready,current_image)
                        sos = False
                elif ev.value == "2":
                    current_image = visual.ImageStim(mywin, "png/mobile_terry.png") # set image
                else:
                    current_image = visual.ImageStim(mywin, "png/mobile_livia.png") # set image
                current_image.draw()
                mywin.flip()
            if ev.value == call:
                endCall = True
                current_image = visual.ImageStim(mywin, "png/mobile_done.png") # set image
                current_image.draw()
                mywin.flip()
                core.wait(2)
            else:
                errors = errors+1
    return [timer.getTime(),errors,error_sos,sos_time]
コード例 #4
0
def navigate(path,navigation,error,get_ready,sos):
    errors = 0
    error_sos = 0
    sos_time = 0
    timer = core.Clock()
    
    current_image = visual.ImageStim(mywin, image="png/navigation1.png") # set image
    current_image.draw()
    get_ready.text ='Navigate from the front door to the couch using events of type "navigate" with values "left","right","down" and "up". Each dot represents one required event.\n\nNote: each error will add 15s to your total time. \n\nPress any key to start'
    showText(get_ready)
    keys = waitForKeypress()
    
    mywin.flip()
    current_image.draw()
    mywin.flip()
    timer.reset()
    
    triggerevents=["navigate"]
    stopevent=("navigate","end")
    trlen_samp = 50
    state = []
    endNavigate = False
    current_idx = 0
    print("Waiting for triggers: %s and endtrigger: %s.%s"%(triggerevents[0],stopevent[0],stopevent[1]))
    while endNavigate is False:
        # grab data after every t:'stimulus' event until we get a {t:'stimulus.training' v:'end'} event 
        #data, events, stopevents, state = bufhelp.gatherdata(triggerevents,trlen_samp,stopevent, state, milliseconds=False)
        data, events, stopevents, state = bufhelp.gatherdata(triggerevents,trlen_samp,[], state, milliseconds=False)
        for ei in np.arange(len(events)-1,-1,-1):
            ev = events[ei]
            if ev.type == "navigate":
                if ev.value == path[current_idx]:
                    current_idx = current_idx + 1
                    current_image = visual.ImageStim(mywin, navigation[current_idx]) # set image
                else:
                    current_image = visual.ImageStim(mywin, error[current_idx]) # set image
                    errors = errors+1
                current_image.draw()
                mywin.flip()
            if current_idx == len(navigation)-1:
                current_image = visual.ImageStim(mywin, "png/navigation_done.png") # set image
                current_image.draw()
                mywin.flip()
                core.wait(2)
                endNavigate = True
            if current_idx > 5 and sos:
                [sos_time,error_sos] = sos_button(get_ready,current_image)
                sos = False
    return [timer.getTime(),errors, error_sos, sos_time]
コード例 #5
0
def processBufferEvents():
    global running
    trlen_ms = 600
    events = bufhelp.buffer_newevents()

    for evt in events:
        if str(evt.type) == 'calibrate' and evt.value == 'start':
            print('Calibration phase, gathering data...')
            data, events, stopevents = bufhelp.gatherdata("stimulus.target",
                                                          750,
                                                          ("calibrate", "end"),
                                                          milliseconds=True)
            pickle.dump({
                "events": events,
                "data": data
            }, open("subject_data", "w"))
        else:
            print(str(evt.sample) + ": " + str(evt))
コード例 #6
0
#!/usr/bin/env python3
# Set up imports and paths
import sys, os
# Get the helper functions for connecting to the buffer
try:     pydir=os.path.dirname(__file__)
except:  pydir=os.getcwd()    
sigProcPath = os.path.join(os.path.abspath(pydir),'../../python/signalProc')
sys.path.append(sigProcPath)
import bufhelp 
import pickle
import h5py

# connect to the buffer, if no-header wait until valid connection
ftc,hdr=bufhelp.connect()

# grab data 
data, events, stopevents, pending = bufhelp.gatherdata()

# save the calibration data
pickle.dump({"events":events,"data":data,'hdr':hdr}, open(dname+'.pk','wb'))#N.B. to pickle open in binary mode

コード例 #7
0
CLASSIFIER_FILENAME = cfg['p300']['classifier_filename']
SAMPLING_TIME = cfg['p300']['sampling_time']

#load the trained classifier
with open(os.path.join(DATA_PATH, CLASSIFIER_FILENAME), 'rb') as f:
    p = pickle.load(f)
    classifier = p['classifier']
    goodch = p['goodch']

# connect to the buffer, if no-header wait until valid connection
ftc, hdr = bufhelp.connect()

while True:
    # wait for data after a trigger event
    data, events, stopevents, _ = bufhelp.gatherdata(
        ["p300.flash"],
        SAMPLING_TIME, [("p300.singleseq", "end"), ("p300.seq", "end")],
        milliseconds=True)

    data = np.array(data)

    # stop processing if needed
    if "p300.seq" in [s.type for s in stopevents]:
        break

    # 0: Remove extra channels
    data = data[:, :, :NUM_OF_CHANNELS]
    # 1: detrend
    data = preproc.detrend(data)
    # 2: bad-channel removal; channels are last
    data = data[:, :, goodch]
    # 3: apply spatial filter
コード例 #8
0
ファイル: spSigProc.py プロジェクト: jadref/buffer_bci
import pickle

bufhelp.connect()

trlen_ms = 600
run = True

print ("Waiting for startPhase.cmd event.")
while run:
    e = bufhelp.waitforevent("startPhase.cmd",1000, True)
    print("Got startPhase event: %s"%e)
    if e is not None:

        if e.value == "calibration":
            print("Calibration phase")
            data, events, stopevents = bufhelp.gatherdata("stimulus.tgtFlash",trlen_ms,("stimulus.training","end"), milliseconds=True)
            pickle.dump({"events":events,"data":data}, open("subject_data", "w"))

        elif e.value == "train":
            print("Training classifier")
            data = preproc.detrend(data)
            data, badch = preproc.badchannelremoval(data)
            data = preproc.spatialfilter(data)
            data = preproc.spectralfilter(data, (0, .1, 10, 12), bufhelp.fSample)
            data, events, badtrials = preproc.badtrailremoval(data, events)
            mapping = {('stimulus.tgtFlash', '0'): 0, ('stimulus.tgtFlash', '1'): 1}
            linear.fit(data,events,mapping)
            bufhelp.update()
            bufhelp.sendevent("sigproc.training","done")

        elif e.value =="testing":
コード例 #9
0
ファイル: spSigProc.py プロジェクト: kstandvoss/BCI17
bufhelp.connect()

trlen_ms = 600
run = True

print("Waiting for startPhase.cmd event.")
while run:
    e = bufhelp.waitforevent("startPhase.cmd", 1000, True)
    print("Got startPhase event: %s" % e)
    if e is not None:

        if e.value == "calibration":
            print("Calibration phase")
            data, events, stopevents = bufhelp.gatherdata(
                "stimulus.tgtFlash",
                trlen_ms, ("stimulus.training", "end"),
                milliseconds=True)
            pickle.dump({
                "events": events,
                "data": data
            }, open("subject_data", "w"))

        elif e.value == "train":
            print("Training classifier")
            data = preproc.detrend(data)
            data, badch = preproc.badchannelremoval(data)
            data = preproc.spatialfilter(data)
            data = preproc.spectralfilter(data, (0, .1, 10, 12),
                                          bufhelp.fSample)
            data, events, badtrials = preproc.badtrailremoval(data, events)
            mapping = {
コード例 #10
0
    freqbands = f['freqbands']
    classifier = f['classifier']
    fs = f['fSample']

# connect to the buffer, if no-header wait until valid connection
ftc, hdr = bufhelp.connect()

# clear event history
pending = []
while True:
    # wait for data after a trigger event
    #  exitevent=None means return as soon as data is ready
    #  N.B. be sure to propogate state between calls
    data, events, stopevents, pending = bufhelp.gatherdata(["stimulus.char"],
                                                           trlen_ms,
                                                           None,
                                                           pending,
                                                           milliseconds=True)

    # get all event type labels
    event_types = [e.type[0] for e in events]
    event_letters = [e.value[0] for e in events]

    # stop processing if needed
    if "stimulus.feedback" in event_types:
        break

    # get data in correct format
    data = np.transpose(data)

    # 1: detrend
コード例 #11
0
bufhelp.connect()

trlen_ms = 700
run = True

print("Waiting for startPhase.cmd event.")
while run:
    e = bufhelp.waitforevent("startPhase.cmd", 1000, True)
    print("Got startPhase event: %s" % e)

    if e is not None:
        if e.value == "calibrate":
            print("Calibration phase")
            data, events, stopevents = bufhelp.gatherdata(
                "stimulus.hybrid",
                trlen_ms, ("stimulus.training", "end"),
                milliseconds=True)

            pickle.dump({
                "events": events,
                "data": data
            }, open("subject_data", "wb"))

        elif e.value == "training":
            print("Training classifier")

            data = preproc.detrend(data)
            data, badch = preproc.badchannelremoval(data)
            data = preproc.spatialfilter(data)
            data = preproc.spectralfilter(data, (1, 10, 15, 25),
                                          bufhelp.fSample)
コード例 #12
0
def watch_tv(channel,min_time,get_ready,sos):
    errors = 0
    error_sos = 0
    sos_time = 0
    
    timer = core.Clock()
    current_image = visual.ImageStim(mywin, image="png/tv.png") # set image
    current_image.draw()
    get_ready.text ='You want to watch tv. First go through each channel using events of type "tv" and value "1", "2" and "3". Then, go through each channel for a second time until you see a channel with a green mark next to it. This is the preferred channel. Look at this channel for at least 30s without changing channels. After 30s, turn off the tv using an event of type "tv" with the value "end". \n\nNote: each error will add 5s to your total time.\n\nPress any key to start'
    showText(get_ready)
    keys = waitForKeypress()
    
    mywin.flip()
    current_image.draw()
    mywin.flip()
    timer.reset()
    
    triggerevents=["tv"]
    stopevent=("tv","end")
    trlen_samp = 50
    state = []
    endTV = False
    current_idx = 0
    channels_seen = [False, False, False]
    channel_timer = core.Clock()
    start_counting = False
    print("Waiting for triggers: %s and endtrigger: %s.%s"%(triggerevents[0],stopevent[0],stopevent[1]))
    while endTV is False:
        # grab data after every t:'stimulus' event until we get a {t:'stimulus.training' v:'end'} event 
        #data, events, stopevents, state = bufhelp.gatherdata(triggerevents,trlen_samp,stopevent, state, milliseconds=False)
        data, events, stopevents, state = bufhelp.gatherdata(triggerevents,trlen_samp,[], state, milliseconds=False)
        channel_update = False
        for ei in np.arange(len(events)-1,-1,-1):
            ev = events[ei]
            if ev.type == "tv":
                if ev.value == "1":
                    current_image = visual.ImageStim(mywin, "png/tv_nature.png") # set image
                    if channels_seen[0] is False:
                        channels_seen[0] = True
                        channel_update = True
                elif ev.value == "2":
                    current_image = visual.ImageStim(mywin, "png/tv_mes_op_tafel.png") # set image
                    if channels_seen[1] is False:
                        channels_seen[1] = True
                        channel_update = True
                    if sos:
                        [sos_time,error_sos] = sos_button(get_ready,current_image)
                        sos = False
                elif ev.value == "3":
                    current_image = visual.ImageStim(mywin, "png/tv_sesamstraat.png") # set image
                    if channels_seen[2] is False:
                        channels_seen[2] = True
                        channel_update = True
                current_image.draw()
                mywin.flip()
            if (channel_update is False) and (False not in channels_seen):
                if ev.value == channel:
                    if start_counting is False:
                        current_image = visual.ImageStim(mywin, "png/tv_done.png") # set image
                        current_image.draw()
                        mywin.flip()
                        start_counting = True
                        channel_timer.reset() # start counting time
                elif ev.value == "end":
                    if channel_timer.getTime >= min_time:
                        endTV = True
                else:
                    start_counting = False
                    print("error!")
                    errors = errors + 1
    return [timer.getTime(),errors,error_sos,sos_time]
コード例 #13
0
# invert the value dict to get a key->value map
ivaluedict = {k: v for k, v in valuedict.items()}

# connect to the buffer, if no-header wait until valid connection
ftc, hdr = bufhelp.connect()
fs = hdr.fSample

pending = []
while True:
    # wait for data after a trigger event
    #  exitevent=None means return as soon as data is ready
    #  N.B. be sure to propogate state between calls
    data, events, stopevents, pending = bufhelp.gatherdata(["stimulus.flash"],
                                                           trlen_ms,
                                                           None,
                                                           pending,
                                                           milliseconds=True,
                                                           verbose=True)

    # get all event type labels
    event_types = [e.type[0] for e in events]

    # stop processing if needed
    if "stimulus.feedback" in event_types:
        break

    print("Applying classifier to %d events" % (len(events)))

    # get data in correct format
    data = np.transpose(data)  # make it [d x tau]
コード例 #14
0
    classifier = f['classifier']
    fs         = f['fSample']

# invert the value dict to get a key->value map
ivaluedict = { k:v for k,v in valuedict.items() }
    
# connect to the buffer, if no-header wait until valid connection
ftc,hdr=bufhelp.connect()

# clear event history
pending = []
while True:
    # wait for data after a trigger event
    #  exitevent=None means return as soon as data is ready
    #  N.B. be sure to propogate state between calls
    data, events, stopevents, pending = bufhelp.gatherdata(["stimulus.target"], trlen_ms, None, pending, milliseconds=True)
    
    # get all event type labels
    event_types = [e.type[0] for e in events] 
    
    # stop processing if needed
    if "stimulus.feedback" in event_types:
        break

    # get data in correct format
    data = np.transpose(data)

    # 1: detrend
    data = preproc.detrend(data)
    # 2: bad-channel removal (as identifed in classifier training)
    data = data[goodch,:,:]
コード例 #15
0
# init connection to the buffer
ftc,hdr=bufhelp.connect();

#define variables
trlen_samp = 50
nSymbols = 2
nr_channels = 4 # in debug mode
erp = np.zeros((nr_channels,trlen_samp,nSymbols))
nTarget = np.zeros((nSymbols,1))

# read in the capfile
Cname,latlong,xy,xyz,capfile= readCapInf.readCapInf('sigproxy_with_TRG')

fig=plt.figure()

# grab data after every t:'stimulus' event until we get a {t:'stimulus.training' v:'end'} event 
data, events, stopevents = bufhelp.gatherdata(["stimulus","experiment"],trlen_samp,("sequence","end"), milliseconds=False)

# loop through all recorded events (last to first)
for ei in np.arange(len(events)-1,-1,-1):
        
    # detrend erp, so we can see stuff
    erp = preproc.detrend(erp)

image3d(erp)  # plot the ERPs
plt.show()




コード例 #16
0
def sos_button(get_ready, previous_image):
    errors = 0
    sos = ["png/sos_food.png", "png/sos_pain.png", "png/sos_toilet.png"]

    # choose random sos
    r = np.random.randint(len(sos))

    timer = core.Clock()
    current_image = visual.ImageStim(mywin, image=sos[r])  # set image
    current_image.draw()
    get_ready.text = 'Emergency! You are in pain, need to go to the toilet or want food. What help you need is indicated by an icon on the screen. First press the SOS button by sending an event of type "sos" and value "on". Once the button is activated and help is on the way, select what type of help you need. You can do so with events of type "sos" and values "toilet", "pain" and "food".\n\nNote: each error will add 60s to your total time. \n\nPress any key to start'
    showText(get_ready)
    keys = waitForKeypress()

    mywin.flip()
    current_image.draw()
    mywin.flip()
    timer.reset()

    triggerevents = ["sos"]
    stopevent = ("sos", "end")
    trlen_samp = 50
    state = []
    endSOS = False
    current_idx = 0
    print("Waiting for triggers: %s and endtrigger: %s.%s" %
          (triggerevents[0], stopevent[0], stopevent[1]))
    while endSOS is False:
        # grab data after every t:'stimulus' event until we get a {t:'stimulus.training' v:'end'} event
        #data, events, stopevents, state = bufhelp.gatherdata(triggerevents,trlen_samp,stopevent, state, milliseconds=False)
        data, events, stopevents, state = bufhelp.gatherdata(
            triggerevents, trlen_samp, [], state, milliseconds=False)
        for ei in np.arange(len(events) - 1, -1, -1):
            ev = events[ei]
            if ev.type == "sos":
                if ev.value == "on":
                    current_image = visual.ImageStim(
                        mywin, "png/alarm_done.png")  # set image
                    endSOS = True
                else:
                    current_image = visual.ImageStim(
                        mywin, "png/alarm_error.png")  # set image
                    errors = errors + 1
                current_image.draw()
                mywin.flip()
                core.wait(1)
                current_image = visual.ImageStim(mywin,
                                                 image=sos[r])  # set image
                current_image.draw()
                mywin.flip()
    current_image = visual.ImageStim(mywin, "png/sos_choice.png")  # set image
    current_image.draw()
    mywin.flip()
    endSOS = False
    while endSOS is False:
        # grab data after every t:'stimulus' event until we get a {t:'stimulus.training' v:'end'} event
        #data, events, stopevents, state = bufhelp.gatherdata(triggerevents,trlen_samp,stopevent, state, milliseconds=False)
        data, events, stopevents, state = bufhelp.gatherdata(
            triggerevents, trlen_samp, [], state, milliseconds=False)
        for ei in np.arange(len(events) - 1, -1, -1):
            ev = events[ei]
            if ev.type == "sos":
                if ev.value == "food":
                    if r == 0:
                        current_image = visual.ImageStim(
                            mywin, "png/food.png")  # set image
                        endSOS = True
                    else:
                        current_image = visual.ImageStim(
                            mywin, "png/food_error.png")  # set image
                        errors = errors + 1
                elif ev.value == "pain":
                    if r == 1:
                        current_image = visual.ImageStim(
                            mywin, "png/pain.png")  # set image
                        endSOS = True
                    else:
                        current_image = visual.ImageStim(
                            mywin, "png/pain_error.png")  # set image
                        errors = errors + 1
                elif ev.value == "toilet":
                    if r == 2:
                        current_image = visual.ImageStim(
                            mywin, "png/toilet.png")  # set image
                        endSOS = True
                    else:
                        current_image = visual.ImageStim(
                            mywin, "png/toilet_error.png")  # set image
                        errors = errors + 1
                current_image.draw()
                mywin.flip()
            if endSOS is True:
                core.wait(3)
                previous_image.draw()  # set image
                mywin.flip()
    return [timer.getTime(), errors]
コード例 #17
0
import pickle

dname = 'training_data'
cname = 'clsfr'

trlen_ms = 3000

#load the trained classifier
if os.path.exists(cname + '.pk'):
    f = pickle.load(open(cname + '.pk', 'rb'))
    classifier = f['classifier']

# connect to the buffer, if no-header wait until valid connection
ftc, hdr = bufhelp.connect()

while True:
    # wait for data after a trigger event
    data, events, stopevents, state = bufhelp.gatherdata(
        ["stimulus.target"],
        trlen_ms, [("stimulus.feedback", "end")],
        milliseconds=True)

    # YOUR CODE HERE #

    # apply classifier, default is a linear-least-squares-classifier
    predictions = linear.predict(data)

    # send the prediction events
    for pred in predictions:
        bufhelp.sendEvent("classifier.prediction", pred)
コード例 #18
0
#!/usr/bin/env python3
# Set up imports and paths
import sys, os
# Get the helper functions for connecting to the buffer
try:     pydir=os.path.dirname(__file__)
except:  pydir=os.getcwd()    
sigProcPath = os.path.join(os.path.abspath(pydir),'../../python/signalProc')
sys.path.append(sigProcPath)
import bufhelp 

# connect to the buffer, if no-header wait until valid connection
bufhelp.connect()

trlen_ms = 600

print("Calibration phase")
# grab data after every t:'stimulus.target' event until we get a {t:'stimulus.training' v:'end'} event 
data, events, stopevents, state = bufhelp.gatherdata()
# save the calibration data
pickle.dump({"events":events,"data":data}, open("subject_data", "w"))
コード例 #19
0
# connect to the buffer, if no-header wait until valid connection
ftc, hdr = bufhelp.connect()
fs = hdr.fSample

pending = []
while True:
    # wait for data after a trigger event
    #  exitevent=None means return as soon as data is ready
    #  N.B. be sure to propagate state between calls
    try:
        print("ErrP classifier waiting for data...")
        bufhelp.sendEvent('errp_clsfr.state', 'listening')
        data, events, stopevents, pending = bufhelp.gatherdata(
            ['stimulus.prediction'],
            trlen_ms,
            None,
            pending,
            milliseconds=True,
            verbose=False)
        # get all event type labels
        events = [(e.type, e.value) for e in events]

        # stop processing if testing ends
        ##if ('feedback.testing', 'end') in events:
        ##    print("Stopping signal processing.")
        ##    break

        print("Applying ErrP classifier to %d events" % len(events))

        # get data in correct format
        data = np.asarray(data)  # [ nTrials x time x d ]
コード例 #20
0
def sos_button(get_ready,previous_image):
    errors = 0
    sos = ["png/sos_food.png", "png/sos_pain.png", "png/sos_toilet.png"]
    
    # choose random sos
    r = np.random.randint(len(sos))
    
    timer = core.Clock()
    current_image = visual.ImageStim(mywin, image=sos[r]) # set image
    current_image.draw()
    get_ready.text ='Emergency! You are in pain, need to go to the toilet or want food. What help you need is indicated by an icon on the screen. First press the SOS button by sending an event of type "sos" and value "on". Once the button is activated and help is on the way, select what type of help you need. You can do so with events of type "sos" and values "toilet", "pain" and "food".\n\nNote: each error will add 60s to your total time. \n\nPress any key to start'
    showText(get_ready)
    keys = waitForKeypress()
    
    mywin.flip()
    current_image.draw()
    mywin.flip()
    timer.reset()
    
    triggerevents=["sos"]
    stopevent=("sos","end")
    trlen_samp = 50
    state = []
    endSOS = False
    current_idx = 0
    print("Waiting for triggers: %s and endtrigger: %s.%s"%(triggerevents[0],stopevent[0],stopevent[1]))
    while endSOS is False:
        # grab data after every t:'stimulus' event until we get a {t:'stimulus.training' v:'end'} event 
        #data, events, stopevents, state = bufhelp.gatherdata(triggerevents,trlen_samp,stopevent, state, milliseconds=False)
        data, events, stopevents, state = bufhelp.gatherdata(triggerevents,trlen_samp,[], state, milliseconds=False)
        for ei in np.arange(len(events)-1,-1,-1):
            ev = events[ei]
            if ev.type == "sos":
                if ev.value == "on":
                    current_image = visual.ImageStim(mywin, "png/alarm_done.png") # set image
                    endSOS = True
                else:
                    current_image = visual.ImageStim(mywin, "png/alarm_error.png") # set image
                    errors = errors + 1
                current_image.draw()
                mywin.flip()
                core.wait(1)
                current_image = visual.ImageStim(mywin, image=sos[r]) # set image
                current_image.draw()
                mywin.flip()
    current_image = visual.ImageStim(mywin, "png/sos_choice.png") # set image
    current_image.draw()
    mywin.flip()
    endSOS = False
    while endSOS is False:
        # grab data after every t:'stimulus' event until we get a {t:'stimulus.training' v:'end'} event 
        #data, events, stopevents, state = bufhelp.gatherdata(triggerevents,trlen_samp,stopevent, state, milliseconds=False)
        data, events, stopevents, state = bufhelp.gatherdata(triggerevents,trlen_samp,[], state, milliseconds=False)
        for ei in np.arange(len(events)-1,-1,-1):
            ev = events[ei]
            if ev.type == "sos":
                if ev.value == "food": 
                    if r == 0:
                        current_image = visual.ImageStim(mywin, "png/food.png") # set image
                        endSOS = True
                    else:
                        current_image = visual.ImageStim(mywin, "png/food_error.png") # set image
                        errors = errors + 1
                elif ev.value == "pain":
                    if r == 1:
                        current_image = visual.ImageStim(mywin, "png/pain.png") # set image
                        endSOS = True
                    else:
                        current_image = visual.ImageStim(mywin, "png/pain_error.png") # set image
                        errors = errors + 1
                elif ev.value == "toilet":
                    if r == 2:
                        current_image = visual.ImageStim(mywin, "png/toilet.png") # set image
                        endSOS = True
                    else:
                        current_image = visual.ImageStim(mywin, "png/toilet_error.png") # set image
                        errors = errors + 1
                current_image.draw()
                mywin.flip()
            if endSOS is True:
                core.wait(3)
                previous_image.draw() # set image
                mywin.flip()
    return [timer.getTime(),errors]
コード例 #21
0
def watch_tv(channel, min_time, get_ready, sos):
    errors = 0
    error_sos = 0
    sos_time = 0

    timer = core.Clock()
    current_image = visual.ImageStim(mywin, image="png/tv.png")  # set image
    current_image.draw()
    get_ready.text = 'You want to watch tv. First go through each channel using events of type "tv" and value "1", "2" and "3". Then, go through each channel for a second time until you see a channel with a green mark next to it. This is the preferred channel. Look at this channel for at least 30s without changing channels. After 30s, turn off the tv using an event of type "tv" with the value "end". \n\nNote: each error will add 5s to your total time.\n\nPress any key to start'
    showText(get_ready)
    keys = waitForKeypress()

    mywin.flip()
    current_image.draw()
    mywin.flip()
    timer.reset()

    triggerevents = ["tv"]
    stopevent = ("tv", "end")
    trlen_samp = 50
    state = []
    endTV = False
    current_idx = 0
    channels_seen = [False, False, False]
    channel_timer = core.Clock()
    start_counting = False
    print("Waiting for triggers: %s and endtrigger: %s.%s" %
          (triggerevents[0], stopevent[0], stopevent[1]))
    while endTV is False:
        # grab data after every t:'stimulus' event until we get a {t:'stimulus.training' v:'end'} event
        #data, events, stopevents, state = bufhelp.gatherdata(triggerevents,trlen_samp,stopevent, state, milliseconds=False)
        data, events, stopevents, state = bufhelp.gatherdata(
            triggerevents, trlen_samp, [], state, milliseconds=False)
        channel_update = False
        for ei in np.arange(len(events) - 1, -1, -1):
            ev = events[ei]
            if ev.type == "tv":
                if ev.value == "1":
                    current_image = visual.ImageStim(
                        mywin, "png/tv_nature.png")  # set image
                    if channels_seen[0] is False:
                        channels_seen[0] = True
                        channel_update = True
                elif ev.value == "2":
                    current_image = visual.ImageStim(
                        mywin, "png/tv_mes_op_tafel.png")  # set image
                    if channels_seen[1] is False:
                        channels_seen[1] = True
                        channel_update = True
                    if sos:
                        [sos_time,
                         error_sos] = sos_button(get_ready, current_image)
                        sos = False
                elif ev.value == "3":
                    current_image = visual.ImageStim(
                        mywin, "png/tv_sesamstraat.png")  # set image
                    if channels_seen[2] is False:
                        channels_seen[2] = True
                        channel_update = True
                current_image.draw()
                mywin.flip()
            if (channel_update is False) and (False not in channels_seen):
                if ev.value == channel:
                    if start_counting is False:
                        current_image = visual.ImageStim(
                            mywin, "png/tv_done.png")  # set image
                        current_image.draw()
                        mywin.flip()
                        start_counting = True
                        channel_timer.reset()  # start counting time
                elif ev.value == "end":
                    if channel_timer.getTime >= min_time:
                        endTV = True
                else:
                    start_counting = False
                    print("error!")
                    errors = errors + 1
    return [timer.getTime(), errors, error_sos, sos_time]
コード例 #22
0
#!/usr/bin/env python3
# Set up imports and paths
import sys, os
# Get the helper functions for connecting to the buffer
try:     pydir=os.path.dirname(__file__)
except:  pydir=os.getcwd()    
sigProcPath = os.path.join(os.path.abspath(pydir),'../../python/signalProc')
sys.path.append(sigProcPath)
import bufhelp 
import pickle

# connect to the buffer, if no-header wait until valid connection
ftc,hdr=bufhelp.connect()

trlen_ms = 600
dname  ='training_data';
cname  ='clsfr';

print("Calibration phase")
# grab data after every t:'stimulus.target' event until we get a {t:'stimulus.training' v:'end'} event 
data, events, stopevents, pending = bufhelp.gatherdata("stimulus.tgtFlash",trlen_ms,("stimulus.training","end"), milliseconds=True)
# save the calibration data
pickle.dump({"events":events,"data":data,'hdr':hdr}, open(dname+'.pk','wb'))#N.B. to pickle open in binary mode
# # also as a hdf5 / .mat v7.3 fi
# # doesn't work.... need to unpack objects into basic types for hdf5
# f = h5py.File(dname+'.mat','w')
# f.create_dataset('data',data=data)
# f.create_dataset('events',data=events)
# f.create_dataset('hdr',data=hdr)

コード例 #23
0
#!/usr/bin/env python3
# Set up imports and paths
import sys, os
# Get the helper functions for connecting to the buffer
try:
    pydir = os.path.dirname(__file__)
except:
    pydir = os.getcwd()
sigProcPath = os.path.join(os.path.abspath(pydir), '../../python/signalProc')
sys.path.append(sigProcPath)
import bufhelp
import pickle
import h5py

# connect to the buffer, if no-header wait until valid connection
ftc, hdr = bufhelp.connect()

# grab data
data, events, stopevents, pending = bufhelp.gatherdata()

# save the calibration data
pickle.dump({
    "events": events,
    "data": data,
    'hdr': hdr
}, open(dname + '.pk', 'wb'))  #N.B. to pickle open in binary mode
コード例 #24
0
import sys

sys.path.append('../../python/signalProc')
import pickle

import bufhelp

_, hdr = bufhelp.connect()
data, events, _ = bufhelp.gatherdata('stimulus.type',
                                     600,
                                     'experiment.end',
                                     milliseconds=True)

pickle.dump({
    'data': data,
    'events': events,
    'hdr': hdr,
}, open('training_data.pkl', 'wb'))
コード例 #25
0
import preproc
import linear
import pickle

dname  ='training_data'
cname  ='clsfr'

trlen_ms = 3000

#load the trained classifier
if os.path.exists(cname+'.pk'):
    f     =pickle.load(open(cname+'.pk','rb'))
    classifier = f['classifier']


# connect to the buffer, if no-header wait until valid connection
ftc,hdr=bufhelp.connect()

while True:
    # wait for data after a trigger event
    data, events, stopevents, state = bufhelp.gatherdata(["stimulus.target"],trlen_ms,[("stimulus.feedback","end")], milliseconds=True)

    # YOUR CODE HERE #
    
    # apply classifier, default is a linear-least-squares-classifier        
    predictions = linear.predict(data)
    
    # send the prediction events
    for pred in predictions:
        bufhelp.sendEvent("classifier.prediction",pred)
コード例 #26
0
#!/usr/bin/env python3
# Set up imports and paths
import sys, os
# Get the helper functions for connecting to the buffer
try:
    pydir = os.path.dirname(__file__)
except:
    pydir = os.getcwd()
sigProcPath = os.path.join(os.path.abspath(pydir), '../../python/signalProc')
sys.path.append(sigProcPath)
import bufhelp

# connect to the buffer, if no-header wait until valid connection
bufhelp.connect()

trlen_ms = 600

print("Calibration phase")
# grab data after every t:'stimulus.target' event until we get a {t:'stimulus.training' v:'end'} event
data, events, stopevents = bufhelp.gatherdata()
# save the calibration data
pickle.dump({"events": events, "data": data}, open("subject_data", "w"))
コード例 #27
0
from image3d import *

# init connection to the buffer
ftc, hdr = bufhelp.connect()

#define variables
trlen_samp = 50
nSymbols = 2
nr_channels = 4  # in debug mode
erp = np.zeros((nr_channels, trlen_samp, nSymbols))
nTarget = np.zeros((nSymbols, 1))

# read in the capfile
Cname, latlong, xy, xyz, capfile = readCapInf.readCapInf('sigproxy_with_TRG')

fig = plt.figure()

# grab data after every t:'stimulus' event until we get a {t:'stimulus.training' v:'end'} event
data, events, stopevents = bufhelp.gatherdata(["stimulus", "experiment"],
                                              trlen_samp, ("sequence", "end"),
                                              milliseconds=False)

# loop through all recorded events (last to first)
for ei in np.arange(len(events) - 1, -1, -1):

    # detrend erp, so we can see stuff
    erp = preproc.detrend(erp)

image3d(erp)  # plot the ERPs
plt.show()
コード例 #28
0
"""
Listens for stimulus.target events until it sees 
the (stimulus.training, end) event. Then saves all data in a pickle.
"""

# Imports
import sys
import pickle

sys.path.append('../signalProc/')
import bufhelp
import h5py

# connect to buffer
ftc, hdr = bufhelp.connect()

# Constants
trlen_ms = 3000
dname = 'training_data'
cname = 'clsfr'

data, events, stopevents = bufhelp.gatherdata("stimulus.target",
                                              trlen_ms,
                                              ("stimulus.training", "end"),
                                              milliseconds=True)
pickle.dump({'events': events,
             'data': data,
             'hdr': hdr
             }, open(dname+'.pk', 'wb')
            )
コード例 #29
0
#!/usr/bin/env python3
BUFFERBCI_PATH = '../../buffer_bci/python/signalProc/'
import sys
import os
sys.path.append(BUFFERBCI_PATH)
import pickle
import yaml
import bufhelp

DATA_PATH = 'data'
if not os.path.exists(DATA_PATH):
    os.makedirs(DATA_PATH)
cfg = yaml.load(open('config.yml', 'r'), Loader=yaml.FullLoader)

SAMPLING_TIME = cfg['p300']['sampling_time']
DATA_FILENAME = cfg['p300']['subject_data_filename']

print("Calibration phase")
# Connect to buffer_bci
ftc, hdr = bufhelp.connect()
# Grab data after every t:'stimulus.cue' event until we get a {t:'stimulus.training' v:'end'} event
data, events, stopevents, _ = bufhelp.gatherdata(
    trigger='stimulus.cue',
    time=SAMPLING_TIME,
    stoptrigger=('stimulus.seq', 'end'),
    milliseconds=True)

# Save the calibration data
with open(os.path.join(DATA_PATH, DATA_FILENAME), 'wb') as f:
    pickle.dump({"events":events,"data":data, "hdr": hdr}, f)
コード例 #30
0
#!/usr/bin/env python3
# Set up imports and paths
import sys, os
# Get the helper functions for connecting to the buffer
try:
    pydir = os.path.dirname(__file__)
except:
    pydir = os.getcwd()
sigProcPath = os.path.join(os.path.abspath(pydir), '../../python/signalProc')
sys.path.append(sigProcPath)
import bufhelp
import pickle

# connect to the buffer, if no-header wait until valid connection
ftc, hdr = bufhelp.connect()

trialLength = 3000
dname = 'training_data_imagined_movement'
cname = 'classifier_imagined_movement'

# grab data after every t:'stimulus.target' event until we get a {t:'stimulus.training' v:'end'} event
data, events, stopevents, pending = bufhelp.gatherdata(
    "stimulus.target",
    trialLength, ("stimulus.training", "end"),
    milliseconds=True)
# save the calibration data
pickle.dump({
    "events": events,
    "data": data,
    'hdr': hdr
}, open(dname + '.pk', 'wb'))  #N.B. to pickle open in binary mode
コード例 #31
0
sigProcPath = os.path.join(os.path.abspath(pydir), '../../python/signalProc')
sys.path.append(sigProcPath)
import bufhelp
import pickle

# connect to the buffer, if no-header wait until valid connection
ftc, hdr = bufhelp.connect()

trlen_ms = 600
dname = 'training_data'
cname = 'clsfr'

print("Calibration phase")
# grab data after every t:'stimulus.target' event until we get a {t:'stimulus.training' v:'end'} event
data, events, stopevents, pending = bufhelp.gatherdata(
    "stimulus.tgtFlash",
    trlen_ms, ("stimulus.training", "end"),
    milliseconds=True)
# save the calibration data
pickle.dump({
    "events": events,
    "data": data,
    'hdr': hdr
}, open(dname + '.pk', 'wb'))  #N.B. to pickle open in binary mode
# # also as a hdf5 / .mat v7.3 fi
# # doesn't work.... need to unpack objects into basic types for hdf5
# f = h5py.File(dname+'.mat','w')
# f.create_dataset('data',data=data)
# f.create_dataset('events',data=events)
# f.create_dataset('hdr',data=hdr)
コード例 #32
0
    valuedict  = f['valuedict']
    classifier = f['classifier']

# invert the value dict to get a key->value map
ivaluedict = { k:v for k,v in valuedict.items() }
    
# connect to the buffer, if no-header wait until valid connection
ftc,hdr=bufhelp.connect()
fs = hdr.fSample

pending = []
while True:
    # wait for data after a trigger event
    #  exitevent=None means return as soon as data is ready
    #  N.B. be sure to propogate state between calls
    data, events, stopevents, pending = bufhelp.gatherdata(["stimulus.flash"], trlen_ms, None, pending, milliseconds=True, verbose=True)
    
    # get all event type labels
    event_types = [e.type[0] for e in events] 
    
    # stop processing if needed
    if "stimulus.feedback" in event_types:
        break

    print("Applying classifier to %d events"%(len(events)))
    
    # get data in correct format
    data = np.transpose(data) # make it [d x tau]
    
    # 1: detrend
    data = preproc.detrend(data)
コード例 #33
0
    if verbose: print('Loading normalizer file....')

    if os.path.exists(Normalizer_name + '.pk'):
        f = pickle.load(open(Normalizer_name + '.pk', 'rb'))
        scaler = f['normalizer']

    if verbose: print('Normalizer loaded #')
else:
    print('Debug mode')

while True:
    if not DEBUG:

        if verbose: print('Collecting data...')

        data, events, stopevents, pending = bufhelp.gatherdata(
            "errp.trigger", recording_lenght, [], milliseconds=True)

        data = np.array(data)

        # get data in correct format
        data = np.moveaxis(data, 1, 2)

        # preprocess pipeline
        # 1: detrend
        data = preproc.detrend(data)
        # 2: bad-channel removal (as identifed in classifier training)
        data = data[:, goodch, :]
        # 3: apply spatial filter (as in classifier training)
        data = preproc.spatialfilter(data, type=spatialfilter)
        # 4 & 5: spectral filter (TODO: check fs matches!!)
        data = preproc.fftfilter(data, 1, freqbands, fs)
コード例 #34
0



##### Gathering data from the events ######


# In this part the programs enters in a loop, it will wait for events telling it to collect data, it'll process it and wait for 
# events again untill the program is closed


while True:

        if verbose: print('Collecting data...')   # Information in terminal, verbose option

        data, events_im, stopevents, pending = bufhelp.gatherdata(["stimulus.target", "stimulus.last_target"],im_length,[], milliseconds=True)



        data = np.array(data)                       # data contains the signal sampled, 750 samples for 32 channels
        data_rec = np.copy(data)

        if verbose: print(events_im[0].value)       # Information in terminal, verbose option, for debugging purposes
        if verbose: print(data.shape)               # Information in terminal, verbose option, for debugging purposes
        
        data = data[:,:,:]                   
        
        if verbose: print(data.shape)               # Information in terminal, verbose option, for debugging purposes

        data           =   np.array(data)   #data support variable
        data           =   np.transpose(data)
コード例 #35
0
    fig.canvas.draw()
    plt.pause(1e-3) # wait for draw.. 1ms
plt.ion()
fig=plt.figure()
drawnow()


triggerevents=["stimulus","experiment"]
stopevent=('sequence','end')
state = []
endTest = False
print("Waiting for triggers: %s and endtrigger: %s"%(triggerevents[0],stopevent[0]))
while endTest is False:
    # grab data after every t:'stimulus' event until we get a {t:'stimulus.training' v:'end'} event 
    #data, events, stopevents, state = bufhelp.gatherdata(triggerevents,trlen_samp,stopevent, state, milliseconds=False)
    data, events, stopevents, state = bufhelp.gatherdata(triggerevents,trlen_samp,[], state, milliseconds=False)

    for ei in np.arange(len(events)-1,-1,-1):
        ev = events[ei]
        # check for exit event
        if (ev.type == "experiment") and (ev.value == "end"):
            endTest = True
            print("end experiment")
            break
        
        # update ERP
        if ev.value is '+':
            classlabel = 1
        else:
            classlabel  = 0
        erp[:,:,classlabel] = (erp[:,:,classlabel]*nTarget[classlabel] + np.transpose(data[ei]))/(nTarget[classlabel]+1);