コード例 #1
0
# open a white full screen window
win = visual.Window(fullscr=True, allowGUI=False, color='black', unit='height') 

# uncomment if you use a clock. Optional because we didn't cover timing this week, 
# but you can find examples in the tutorial code 
#trialClock = core.Clock()

#%% up to you!
# this is where you build a trial that you might actually use one day!
# just try to make one trial ordering your lines of code according to the 
# sequence of events that happen on one trial
# if you're stuck you can use the responseExercise.py answer as a starting point 

# maybe start by making stimulus objects (e.g. myPic = visual.ImageStim(...))  
myText = visual.TextStim(win,text='+',pos=(0,0), color = 'white')
myPic = visual.ImageStim(win, image = 'T.png', pos=(0,0))
# then draw all stimuli
myText.draw()
myPic.draw()
# then flip your window
win.flip()
# then record your responses

keys = event.waitKeys(keyList=('f','j'))

#%% Required clean up
# this cell will make sure that your window displays for a while and then 
# closes properly


コード例 #2
0
def presentStimuli(main_window,
                   dataPath,
                   ID,
                   stimuli_list,
                   presTime=.1,
                   catchBuffer=0.5,
                   outputFolder="/Data"):
    win = main_window
    sceneVis = visual.ImageStim(win, image=None)
    # foo()

    for itemNum, cur_row in enumerate(stimuli_list):

        participant = str(ID).zfill(2)
        trial = str(cur_row[2]).zfill(3)
        trial_type = cur_row[0]
        img = cur_row[1]
        if trial_type == 'prac':
            trial = 'p' + trial[1:]
        audio_file = exp + '_' + participant + "_" + trial + ".wav"
        data_file = exp + '_' + participant + "_" + ".txt"
        audio_path = dataPath + '/' + audio_file
        pdata_path = dataPath + '/' + data_file

        stim_path = os.getcwd() + stimuliDir + img

        # fixation duration set in gui by researcher
        fixation_duration = fixDur  # float(cur_row[3])/1000 + 1

        recorder = hf.AudioRecorder(audio_path)

        # vars to be recorded. some will be subbed out...
        word, error, word_onset, word_duration = '', '', '', ''

        # set up key press watcher
        key_check = []
        cur_key = event.getKeys(keyList=['p', 'u', 'q', 'space'])
        if cur_key:
            key_check.append(cur_key[0])

        # load current image.
        sceneVis.setImage(stim_path)
        sceneVis.draw()
        recorder.start()

        main_window.flip()  # show stim

        core.wait(presTime)  # for this duration,
        # print("1 ",key_check)
        main_window.flip()  # then hide screen

        trial_duration = presTime
        response = []
        if buzzerGo:  # play the deadline buzzer
            response, duration = hf.playAudio("bell.wav", catchBuffer)
            trial_duration += duration

        recorder.stop()
        # print("2 ",key_check,response)

        header = [
            "computer", "datetime", "exp", "cbal", "participant", "trial",
            "trial_type", "img", "trial_duration", "audio_path", "audio_file",
            "word", "error", "word_onset", "word_duration"
        ]
        # this should be moved to outside the loop

        # cur_date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
        lrow = [
            computer, cur_date, exp, cbal, participant, trial, trial_type, img,
            trial_duration, audio_path, audio_file, word, error, word_onset,
            word_duration
        ]

        # write long format.
        try:
            hf.write_2('data.csv', lrow, header)
            # print(lrow)
        except:
            print("error opening datafile")
            hf.write_2('{0}.csv'.format(file_date), lrow, header)

        # data.txt for participant in the part folder
        try:
            hf.write_2(pdata_path, lrow, header)
            # print(lrow)
        except:
            print("error opening pdatafile")
            hf.write_2('{0}.csv'.format(file_date), lrow, header)

        # write wide format.
        if trial_type == "trgt":
            # lrow[6] = stimuli_list[itemNum - 1][0] # pull the label from previous trial to determine 'trial_type'
            try:
                hf.write_2('wdata.csv', lrow, header)
                print("wdata")
                print(lrow)
            except:
                print("error opening datafile")
                hf.write_2('w{0}.csv'.format(file_date), lrow, header)
        # key_check = event.getKeys(keyList=['a'])

        # experimenter control functions

        if pause_key in key_check or pause_key in response:
            key_check = ''
            hf.presentText(main_window,
                           "*paused*",
                           wait=True,
                           timeDelay=2,
                           text_ht=80)

        if quit_key in key_check or quit_key in response:
            break

        if fixGo:  # show the fix cross
            fix_timer = core.Clock()
            hf.presentText(main_window,
                           "+",
                           wait=False,
                           timeDelay=fixation_duration,
                           text_ht=80)

        event.getKeys(keyList=['p', 'u', 'q', 'space'])
コード例 #3
0
ファイル: visual_fmri_run1.py プロジェクト: sbasils/qlab
                             wrapWidth=None,
                             color=u'black',
                             colorSpace='rgb',
                             opacity=1,
                             depth=-1.0)

# Initialize components for Routine "vreminder"
vreminderClock = core.Clock()
vreminder_image = visual.ImageStim(win=win,
                                   name='vreminder_image',
                                   image=u'Alien%s.BMP' %
                                   str(expInfo['vtarget']),
                                   mask=None,
                                   ori=0,
                                   pos=[0, -0.3],
                                   size=None,
                                   color=[1, 1, 1],
                                   colorSpace='rgb',
                                   opacity=1,
                                   flipHoriz=False,
                                   flipVert=False,
                                   texRes=128,
                                   interpolate=True,
                                   depth=0.0)
vreminder_text = visual.TextStim(win=win,
                                 ori=0,
                                 name='vreminder_text',
                                 text='Press the button when you see this!',
                                 font='Arial',
                                 pos=[0, 0.4],
                                 height=0.1,
                                 wrapWidth=None,
コード例 #4
0
temp_instr = visual.TextStim(win, instr[0], color='black', pos=(0.0, 0.0))
temp_instr.draw()
win.update()
event.waitKeys(keyList=['space'])
win.flip()

n = 1
for char in char_intro_order:
    temp_instr = visual.TextStim(win,
                                 instr[n] + char,
                                 color='black',
                                 pos=(0, 0.3))
    char_stim = Image.open(
        char_dir + [i for i in os.listdir(char_dir) if i.startswith(char)][0])
    char_stim.thumbnail(item_size, Image.ANTIALIAS)
    char_stim = visual.ImageStim(win, char_stim, pos=[0, -0.2])
    temp_instr.draw()
    char_stim.draw()
    win.update()
    core.wait(time_intro)
    win.flip()
    n = n + 1

# Example of encoding presentation
"""temp_instr = visual.TextStim(win, instr[12], color='black', pos=[0,0])
temp_instr.draw()
win.update()
event.waitKeys(keyList=['space'])
win.flip()"""
"""
temp_instr = visual.TextStim(win, instr[13], color='black', pos=[0,0.7])
コード例 #5
0
def itemrecog_pattsep(subject_stim, context_bind_items, count):
    # Item recognition and pattern separation instructions
    for n in range(17, 20):
        temp_instr = visual.TextStim(win, instr[n], color='black', pos=[0, 0])
        temp_instr.draw()
        win.update()
        event.waitKeys(keyList=['space'])
        win.flip()

    encoding_pres_items = subject_stim[subject_stim['Part'] == 'encoding']

    feedback_correct = 'That’s right.'
    feedback_incorrect = 'Actually, you saw that friend with this object circled in black.'

    random.shuffle(character_list)
    for character in character_list:
        if event.getKeys(['escape']):
            win.close()
            core.quit()
        subject_stim['Scene'].iloc[n].split('_')[0]
        items_already_pres = subject_stim['Item'].tolist(
        ) + subject_stim['Lure_1'].tolist() + subject_stim['Lure_2'].tolist()

        # Get two random category lures
        lures_ir = random.sample([
            x for x in stimulus_key[stimulus_key['Character'] == character]
            ['Item'].unique() if x not in items_already_pres
        ], 2)
        # Get lure items that were not presented during encoding and set random color
        lure1_ir = lures_ir[0]
        lure2_ir = lures_ir[1]
        target_ir = [
            x for x in encoding_pres_items[encoding_pres_items['Character'] ==
                                           character]['Item'].tolist()
            if x not in context_bind_items
        ][0]
        subject_stim.loc[count, 'Part'] = 'item_recognition'
        subject_stim.loc[count, 'Character'] = character
        subject_stim.loc[count, 'Item'] = target_ir
        subject_stim.loc[count, 'Lure_1'] = lure1_ir
        subject_stim.loc[count, 'Lure_2'] = lure2_ir

        # Present stimuli
        char_stim = Image.open(char_dir + [
            i for i in os.listdir(char_dir) if i.endswith(character + '.png')
        ][0])
        lure1_stim = Image.open(item_dir + character + '/' + [
            i for i in os.listdir(item_dir + character + '/')
            if i.startswith(lure1_ir + '_white.png')
        ][0])
        lure2_stim = Image.open(item_dir + character + '/' + [
            i for i in os.listdir(item_dir + character + '/')
            if i.startswith(lure2_ir + '_white.png')
        ][0])
        target_stim = Image.open(item_dir + character + '/' + [
            i for i in os.listdir(item_dir + character + '/')
            if i.startswith(target_ir + '_white.png')
        ][0])
        char_stim.thumbnail(item_size, Image.ANTIALIAS)
        lure1_stim.thumbnail(item_size, Image.ANTIALIAS)
        lure2_stim.thumbnail(item_size, Image.ANTIALIAS)
        target_stim.thumbnail(item_size, Image.ANTIALIAS)
        stim_pos = [[-0.5, -0.6], [0, -0.6], [0.5, -0.6]]
        random.shuffle(stim_pos)
        char_pres = visual.ImageStim(win, char_stim, pos=[0, 0.5])
        lure1_pres = visual.ImageStim(win, lure1_stim, pos=stim_pos[0])
        lure2_pres = visual.ImageStim(win, lure2_stim, pos=stim_pos[1])
        target_pres = visual.ImageStim(win, target_stim, pos=stim_pos[2])
        char_pres.draw()
        lure1_pres.draw()
        lure2_pres.draw()
        target_pres.draw()
        win.update()
        timer.reset()

        # Record response and give feedback
        while True:
            if mouse.isPressedIn(target_pres):
                subject_stim.loc[count, 'Reaction_Time'] = timer.getTime()
                subject_stim.loc[count, 'Answer'] = target_ir
                temp_instr = visual.TextStim(win,
                                             feedback_correct,
                                             color='black',
                                             pos=[0, 0])
                feedback_circle = visual.Polygon(win,
                                                 edges=100,
                                                 radius=0.3,
                                                 pos=target_pres.pos)
                feedback_circle.lineColor = 'black'
                feedback_circle.lineWidth = 7
                char_pres.draw()
                lure1_pres.draw()
                lure2_pres.draw()
                target_pres.draw()
                temp_instr.draw()
                feedback_circle.draw()
                win.flip()
                core.wait(time_bind)
                break
            elif mouse.isPressedIn(lure1_pres):
                subject_stim.loc[count, 'Reaction_Time'] = timer.getTime()
                subject_stim.loc[count, 'Answer'] = lure1_ir
                temp_instr = visual.TextStim(win,
                                             feedback_incorrect,
                                             color='black',
                                             pos=[0, 0])
                feedback_circle = visual.Polygon(win,
                                                 edges=100,
                                                 radius=0.3,
                                                 pos=target_pres.pos)
                feedback_circle.lineColor = 'black'
                feedback_circle.lineWidth = 7
                char_pres.draw()
                lure1_pres.draw()
                lure2_pres.draw()
                target_pres.draw()
                temp_instr.draw()
                feedback_circle.draw()
                win.flip()
                core.wait(time_bind)
                break
            elif mouse.isPressedIn(lure2_pres):
                subject_stim.loc[count, 'Reaction_Time'] = timer.getTime()
                subject_stim.loc[count, 'Answer'] = lure2_ir
                temp_instr = visual.TextStim(win,
                                             feedback_incorrect,
                                             color='black',
                                             pos=[0, 0])
                feedback_circle = visual.Polygon(win,
                                                 edges=100,
                                                 radius=0.3,
                                                 pos=target_pres.pos)
                feedback_circle.lineColor = 'black'
                feedback_circle.lineWidth = 7
                char_pres.draw()
                lure1_pres.draw()
                lure2_pres.draw()
                target_pres.draw()
                temp_instr.draw()
                feedback_circle.draw()
                win.flip()
                core.wait(time_bind)
                break
        win.update()
        win.flip()
        fix_pres = scene_pres = visual.ImageStim(win, fixation, pos=[0, 0])
        fix_pres.draw()
        win.update()
        core.wait(time_fixcr)
        win.flip()
        subject_stim.to_csv(save_subj_file_name)
        count = count + 1

        # Pattern Separation
        target_ps = encoding_pres_items[encoding_pres_items['Item'] ==
                                        target_ir][['Item', 'Color'
                                                    ]].iloc[0].str.cat(sep='_')
        lures_ps = random.sample([
            x for x in stimulus_key[stimulus_key['Item'] == target_ir]['Stim']
            if x not in target_ps
        ], 2)
        lure1_ps = lures_ps[0]
        lure2_ps = lures_ps[1]
        subject_stim.loc[count, 'Part'] = 'pattern_separation'
        subject_stim.loc[count, 'Character'] = character
        subject_stim.loc[count, 'Item'] = target_ps.rsplit('_', 1)[0]
        subject_stim.loc[count, 'Color'] = target_ps.rsplit('_', 1)[1]
        subject_stim.loc[count, 'Lure_1'] = lure1_ps
        subject_stim.loc[count, 'Lure_2'] = lure2_ps

        lure1_ps_stim = Image.open(item_dir + character + '/' + [
            i for i in os.listdir(item_dir + character + '/')
            if i.startswith(lure1_ps + '.png')
        ][0])
        lure2_ps_stim = Image.open(item_dir + character + '/' + [
            i for i in os.listdir(item_dir + character + '/')
            if i.startswith(lure2_ps + '.png')
        ][0])
        target_ps_stim = Image.open(item_dir + character + '/' + [
            i for i in os.listdir(item_dir + character + '/')
            if i.startswith(target_ps + '.png')
        ][0])
        lure1_ps_stim.thumbnail(item_size, Image.ANTIALIAS)
        lure2_ps_stim.thumbnail(item_size, Image.ANTIALIAS)
        target_ps_stim.thumbnail(item_size, Image.ANTIALIAS)
        stim_pos = [[-0.5, -0.6], [0, -0.6], [0.5, -0.6]]
        random.shuffle(stim_pos)
        char_pres = visual.ImageStim(win, char_stim, pos=[0, 0.5])
        lure1_ps_pres = visual.ImageStim(win, lure1_ps_stim, pos=stim_pos[0])
        lure2_ps_pres = visual.ImageStim(win, lure2_ps_stim, pos=stim_pos[1])
        target_ps_pres = visual.ImageStim(win, target_ps_stim, pos=stim_pos[2])
        char_pres.draw()
        lure1_ps_pres.draw()
        lure2_ps_pres.draw()
        target_ps_pres.draw()
        win.update()
        timer.reset()

        while True:
            if mouse.isPressedIn(target_ps_pres):
                subject_stim.loc[count, 'Reaction_Time'] = timer.getTime()
                subject_stim.loc[count, 'Answer'] = target_ps
                break
            elif mouse.isPressedIn(lure1_ps_pres):
                subject_stim.loc[count, 'Reaction_Time'] = timer.getTime()
                subject_stim.loc[count, 'Answer'] = lure1_ps
                break
            elif mouse.isPressedIn(lure2_ps_pres):
                subject_stim.loc[count, 'Reaction_Time'] = timer.getTime()
                subject_stim.loc[count, 'Answer'] = lure2_ps
                break
        win.update()
        win.flip()
        count = count + 1
        fix_pres = scene_pres = visual.ImageStim(win, fixation, pos=[0, 0])
        fix_pres.draw()
        win.update()
        core.wait(time_fixcr)
        win.flip()
        subject_stim.to_csv(save_subj_file_name)
    return subject_stim, count
コード例 #6
0
ファイル: Ultimatum.py プロジェクト: fareri-lab/srndna
#checkpoint
print "got to check 1"

#define fixation
fixation = visual.TextStim(win, text="+", height=2)

#waiting for trigger
ready_screen = visual.TextStim(
    win,
    text=
    "Please wait for Lets Make a Deal to begin! \n\nRemember to keep your head still!",
    height=1.5)

#decision screen
pictureStim = visual.ImageStim(win, pos=(0, 3.5), size=(6.65, 6.65))
resp_text_reject = visual.TextStim(win,
                                   text="Reject Offer",
                                   pos=(-7, -4.8),
                                   height=1,
                                   alignHoriz="center")
resp_text_accept = visual.TextStim(win,
                                   text="Accept Offer",
                                   pos=(7, -4.8),
                                   height=1,
                                   alignHoriz="center")
offer_text = visual.TextStim(win, pos=(0, -1.5), alignHoriz="center", text='')

#outcome screen
outcome_stim = visual.TextStim(win, pos=(0, -2.5), text='')
コード例 #7
0
        wrapWidth=None,
        color='white',
        colorSpace='rgb',
        opacity=1,
        depth=0.0)
    begExpClock = core.Clock()
    #Initialise components for Routine "fixScr"
    fixScrClock = core.Clock()

    fixation = visual.ImageStim(win=win,
                                name='fixation',
                                image=u'Stim' + os.path.sep + 'fixation.jpg',
                                mask=None,
                                units=u'pix',
                                ori=0,
                                pos=[0, 0],
                                size=[256, 256],
                                color=[1, 1, 1],
                                colorSpace=u'rgb',
                                opacity=1,
                                texRes=128,
                                interpolate=True,
                                depth=-1.0)

    #Initialise components for Routine "trial"
    RTclock = core.Clock()
    trialClock = core.Clock()
    image1 = visual.ImageStim(win=win,
                              name='image1',
                              units=u'pix',
                              image='sin',
                              mask=None,
コード例 #8
0
ファイル: CueReactivity.py プロジェクト: rkuplicki/LIBR_MOCD
def run_try():      
    schedules = [f for f in os.listdir(os.path.dirname(__file__)) if f.endswith('.schedule')]
    if not g.session_params['auto_advance']:
        myDlg = gui.Dlg(title="CR")
        myDlg.addField('Run Number', choices=schedules, initial=g.run_params['run'])
        myDlg.show()  # show dialog and wait for OK or Cancel
        if myDlg.OK:  # then the user pressed OK
            thisInfo = myDlg.data
        else:
            print 'QUIT!'
            return -1#the user hit cancel so exit 
        g.run_params['run'] = thisInfo[0]
    
    
    param_file = g.run_params['run'][0:-9] + '.params' #every .schedule file can (probably should) have a .params file associated with it to specify running parameters (including part of the output filename)
    StimToolLib.get_var_dict_from_file(os.path.join(os.path.dirname(__file__), param_file), g.run_params)
    g.prefix = StimToolLib.generate_prefix(g)


    schedule_file = os.path.join(os.path.dirname(__file__), g.run_params['run'])
    StimToolLib.general_setup(g)
    trial_types,images,durations,junk = StimToolLib.read_trial_structure(schedule_file, g.win, g.msg)
    durations = durations[0] #durations of the images/itis
    images = images[0]
    for i in images:
        i.size = (g.session_params['screen_x'], g.session_params['screen_y']) #set stimulus images to be fullscreen
    #for i in range(len(trial_types)): #convert to int for easier decoding
    #    trial_types[i] = int(trial_types[i])
    

    #g.rating_marker = visual.ImageStim(g.win, image=os.path.join(os.path.dirname(__file__),  'media/rating_mark_down.png'), pos=[285,-450], units='pix')
    #g.rating_marker_selected = visual.ImageStim(g.win, image=os.path.join(os.path.dirname(__file__),  'media/rating_mark_down_selected.png'), pos=[285,-450], units='pix')
    
    
    g.box = visual.ImageStim(g.win, image=os.path.join(os.path.dirname(__file__),  'media','YellowFrame.gif'), units='pix', mask=os.path.join(os.path.dirname(__file__),  'media/frame_mask.gif'))
    g.box.size = (g.session_params['screen_x'], g.session_params['screen_y']) #set stimulus images to be fullscreen

    #initialize question/response images
    g.question_image = visual.ImageStim(g.win, image=os.path.join(os.path.dirname(__file__), g.run_params['question_image']))
    g.response_image_1 = visual.ImageStim(g.win, image=os.path.join(os.path.dirname(__file__), g.run_params['response_image_1']))
    g.response_image_2 = visual.ImageStim(g.win, image=os.path.join(os.path.dirname(__file__), g.run_params['response_image_2']))
    g.response_image_3 = visual.ImageStim(g.win, image=os.path.join(os.path.dirname(__file__), g.run_params['response_image_3']))
    g.response_image_4 = visual.ImageStim(g.win, image=os.path.join(os.path.dirname(__file__), g.run_params['response_image_4']))
    g.frame_reminder = visual.ImageStim(g.win, image=os.path.join(os.path.dirname(__file__), 'media', 'frame_reminder.PNG'))

    start_time = data.getDateStr()
    fileName = os.path.join(g.prefix + '.csv')
    
    
    g.output = open(fileName, 'w')
    
    sorted_events = sorted(event_types.iteritems(), key=operator.itemgetter(1))
    g.output.write('Administrator:,' + g.session_params['admin_id'] + ',Original File Name:,' + fileName + ',Time:,' + start_time + ',Parameter File:,' +  param_file + ',Event Codes:,' + str(sorted_events) + '\n')
    g.output.write('trial_number,trial_type,event_code,absolute_time,response_time,response,result\n')
    StimToolLib.task_start(StimToolLib.CUE_REACTIVITY_CODE, g)
    instruct_start_time = g.clock.getTime()
    StimToolLib.mark_event(g.output, 'NA', 'NA', event_types['INSTRUCT_ONSET'], instruct_start_time, 'NA', 'NA', 'NA', g.session_params['signal_parallel'], g.session_params['parallel_port_address'])

    StimToolLib.run_instructions_keyselect(os.path.join(os.path.dirname(__file__), 'media', 'instructions', g.run_params['instruction_schedule']), g)

    g.trial = 0
    if g.session_params['scan']:
        StimToolLib.wait_scan_start(g.win)
    else:
        StimToolLib.wait_start(g.win)
    instruct_end_time = g.clock.getTime()
    StimToolLib.mark_event(g.output, 'NA', 'NA', event_types['TASK_ONSET'], instruct_end_time, instruct_end_time - instruct_start_time, 'NA', 'NA', g.session_params['signal_parallel'], g.session_params['parallel_port_address'])
    g.ideal_trial_start = instruct_end_time
    g.win.flip()
    
    
    for t, i, d in zip(trial_types, images, durations):
        g.trial_type = t
        do_one_trial(t, i, d)
        g.trial = g.trial + 1
コード例 #9
0
    wrapWidth=None,
    color='white',
    colorSpace='rgb',
    opacity=1,
    depth=0.0)

# Initialize components for Routine "trial"
trialClock = core.Clock()
ISI = core.StaticPeriod(win=win, screenHz=expInfo['frameRate'], name='ISI')
image = visual.ImageStim(
    win=win,
    name='image',
    image='bva3-315r_1024_512.jpg',
    mask=None,  #kamil
    ori=0,
    pos=[0, 0],
    size=None,
    color=[1, 1, 1],
    colorSpace=u'rgb',
    opacity=1,
    texRes=128,
    interpolate=False,
    depth=-1.0)

# Initialize components for Routine "feedback"
feedbackClock = core.Clock()
#msg variable just needs some value at start
msg = ''
text = visual.TextStim(win=win,
                       ori=0,
                       name='text',
                       text='nonsense',
コード例 #10
0
                          height=digit_height)
digit_c = visual.TextStim(win=mywin,
                          text='',
                          pos=[0, 0],
                          color=text_color,
                          height=digit_height)
text_center = visual.TextStim(win=mywin,
                              text='',
                              pos=[0, 0],
                              color=text_color,
                              height=text_height / 2,
                              wrapWidth=25,
                              alignHoriz='center')
face_win = visual.ImageStim(win=mywin,
                            image=path_feedback_win,
                            pos=[0, 0],
                            size=face_size,
                            units='cm')
face_los = visual.ImageStim(win=mywin,
                            image=path_feedback_los,
                            pos=[0, 0],
                            size=face_size,
                            units='cm')
instr = visual.ImageStim(win=mywin, image=path_instr_con)

### Experiment #################################################################
# Create clocks
timer = core.CountdownTimer()

# Data handlers
exp = data.ExperimentHandler(name='ot_Dyscalculia_training',
コード例 #11
0
    color='black',
    text=('It is very important to stay focused during the experiment. '
          'Please respond accurately.'))

Instr_1h = visual.TextStim(
    win=win,
    name='Instr_1h',
    color='black',
    text=('Performance feedback is provided. You will hear a '
          'high-frequency tone if you respond correctly and a low-frequency '
          'tone if you respond incorrectly. To give you a visualization of '
          'the task, press the spacebar to move on to the next page.'))

Mapping = visual.ImageStim(
    win=win,
    name='Mapping',  # Loads image mapping responses to keys
    image=os.path.join('stimuli', 'Mapping.png'),
    size=(1, 2))

Instr_1i = visual.TextStim(
    win=win,
    name='Instr_1i',
    color='black',
    text=('Since you will be asked to distinguish between uppercase and '
          'lowercase in some trials, some sample letters will be presented '
          'to get you familiar with the font size. Press the spacebar '
          'to continue.'))

Instr_1j = visual.TextStim(
    win=win,
    name='Instr_1j',
コード例 #12
0
        iloc = np.random.permutation(np.size(locd))

        for i in range(48):
            loc[0, i] = locd[0, iloc[i]]

        for i in range(48):

            if loc[0, i] == 1:

                C, whichring, cx, cy = grid_nc(i)

                # drawing a target
                if trials[2, itrial] == 0:
                    targetSide = 'r'
                    target = visual.ImageStim(win,
                                              image='stimuli/rlgraycircle.jpg',
                                              pos=[cx, cy])
                    target.draw()
                elif trials[2, itrial] == 1:
                    targetSide = 'l'
                    target = visual.ImageStim(win,
                                              image='stimuli/llgraycircle.jpg',
                                              pos=[cx, cy])
                    target.draw()

        win.flip()

        win.getMovieFrame(
        )  # Defaults to front buffer, I.e. what's on screen now.
        win.saveMovieFrames(
            'exp1b_trial_' + str(itrial) + '_colour' + str(ssmc) +
コード例 #13
0
                             color=u'black',
                             colorSpace='rgb',
                             opacity=1,
                             depth=-1.0)
p_port = parallel.ParallelPort(address=u'0xE010')

# Initialize components for Routine "trial"
trialClock = core.Clock()
imageCard = visual.ImageStim(win=win,
                             name='imageCard',
                             image='sin',
                             mask=None,
                             ori=0,
                             pos=[0, 0],
                             size=[0.3, 0.5],
                             color=[1, 1, 1],
                             colorSpace='rgb',
                             opacity=1,
                             flipHoriz=False,
                             flipVert=False,
                             texRes=128,
                             interpolate=True,
                             depth=0.0)
textPrepResponse = visual.TextStim(win=win,
                                   ori=0,
                                   name='textPrepResponse',
                                   text=u'+',
                                   font=u'Arial',
                                   pos=[0, 0],
                                   height=0.1,
                                   wrapWidth=None,
コード例 #14
0
def Initialize():
    #declare global variables
    global window
    global mouse
    global kb
    global ratingScale
    global task1_list
    global log_file1
    global image_animal
    global image_award
    global image_prompt
    global judgement_sentence
    global judgement_prompt
    global log_file2

    #create visual window
    window = visual.Window([1280, 720],
                           monitor="testMonitor",
                           units="deg",
                           screen=0,
                           color="white")

    #initialize mouse
    mouse = event.Mouse()

    #initialize keyboard
    kb = keyboard.Keyboard()

    #initialize rating scale for task 1
    #ranging from 1 to 5, "1" and "5" labeled, only mouse click accepted, no extra conforming click, black scale line
    ratingScale = visual.RatingScale(window,
                                     low=1,
                                     high=5,
                                     scale="1-unacceptable, 5-perfect",
                                     mouseOnly=True,
                                     singleClick=True,
                                     lineColor="black")

    #creating task 1 sentence stimuli
    #("testing sentence", "sentence type/experimental condition")
    #5 sentence types, each having 5 sentences, 25 in total
    task1_list = [
        ("John doesn't like espresso or biscotti.", "SD"),
        ("Pat didn't enter the room or see her.", "SD"),
        ("The road wasn't very wide or easy to find.", "SD"),
        ("Andrew doesn't speak English or German.", "SD"),
        ("We did not close the door or the window.", "SD"),
        ("John doesn't like espresso and biscotti.", "SC"),
        ("Pat didn't enter the room and see her.", "SC"),
        ("The road wasn't very wide and easy to find.", "SC"),
        ("Andrew doesn't speak English and German.", "SC"),
        ("We didn't close the door and the window.", "SC"),
        ("It is not true that John likes espresso or biscotti.", "HD"),
        ("It is not true that Pat entered the room or saw her.", "HD"),
        ("It is not true that the road was very wide or easy to find.", "HD"),
        ("It is not true that Andrew speaks English or German.", "HD"),
        ("It is not true that we closed the door or the window.", "HD"),
        ("It is not true that John likes espresso and biscotti.", "HC"),
        ("It is not true that Pat entered the room and saw her.", "HC"),
        ("It is not true that the road was very wide and easy to find.", "HC"),
        ("It is not true that Andrew speaks English and German.", "HC"),
        ("It is not true that we closed the door and the window.", "HC"),
        ("John likes both espresso and biscotti.", "filler"),
        ("Pat didn't enter the room nor see her", "filler"),
        ("The road wasn't easy to find because it was not very wide.",
         "filler"), ("Andrew speaks German and English.", "filler"),
        ("We closed the door as well as the window.", "filler")
    ]

    #creating log file for task 1
    log_file1 = open("task1.csv", "a")

    #creating images for task 2, animal + award
    # 5 animal images, presented 5 units to the left of the center in the window
    image_animal = [
        visual.ImageStim(window, image="elephant.gif", pos=(-5, 0)),
        visual.ImageStim(window, image="lion.gif", pos=(-5, 0)),
        visual.ImageStim(window, image="monkey.gif", pos=(-5, 0)),
        visual.ImageStim(window, image="panda.gif", pos=(-5, 0)),
        visual.ImageStim(window, image="zebra.gif", pos=(-5, 0))
    ]
    # 3 award images, presented 5 units to the right of the center in the window
    image_award = [
        visual.ImageStim(window, image="x.gif", pos=(5, 0)),
        visual.ImageStim(window, image="trophy.gif", pos=(5, 0)),
        visual.ImageStim(window, image="star.gif", pos=(5, 0))
    ]

    #creating prompt during the image visualization in task 2--what does each award mean--on top of the images
    image_prompt = visual.TextStim(
        window,
        text="Cross: nothing.  Star: one vegetable.  Trophy: both vegetables",
        height=0.7,
        wrapWidth=30,
        color="black",
        pos=(0, 10))

    #creating 4 judgement sentences for task 2
    judgement_sentence = [
        visual.TextStim(
            window,
            text=
            "The animal ate the cake, but he didn't eat the carrot or the pepper.",
            height=1,
            wrapWidth=30,
            color="black"),
        visual.TextStim(
            window,
            text=
            "The animal ate the cake, but he didn't eat the carrot and the pepper.",
            height=1,
            wrapWidth=30,
            color="black"),
        visual.TextStim(
            window,
            text=
            "The animal ate the cake. He also ate both the carrot and the pepper.",
            height=1,
            wrapWidth=30,
            color="black"),
        visual.TextStim(
            window,
            text="The animal ate the cake and one of the vegetables.",
            height=1,
            wrapWidth=30,
            color="black")
    ]

    #creating prompt for the judgement task under the testing sentence
    judgement_prompt = visual.TextStim(
        window,
        text=
        "True or False? \n \n \n Press 'y' if true, 'n' if false, press SPACE if not sure about the answer.",
        height=0.8,
        wrapWidth=25,
        color="black",
        pos=(0, -5))

    # creating log file for task 2
    log_file2 = open("task2.csv", "a")
コード例 #15
0
# -*- coding: utf-8 -*-
from psychopy import visual, core
import os

#画面の設定
myWin = visual.Window(fullscr=True,
                      monitor='SubMonitor',
                      allowGUI=False,
                      screen=1,
                      units='norm',
                      color=(1, 1, 1))

#現在このコードのある場所のパスを取得して、そのパスの1つ下のstimフォルダに移動する(画像ファイルを読み込むため)
curD = os.getcwd()
os.chdir(os.path.join(curD, 'stimli'))

#使用する画像のリスト
StimList = ["card1.jpg", "card2.jpg", "card3.jpg", "card4.jpg"]
try:
    for i in range(4):
        #画像刺激を提示(2秒ずつ)
        cardImage = visual.ImageStim(win=myWin,
                                     image=StimList[i],
                                     pos=(0, 0),
                                     units='norm')
        cardImage.draw()
        myWin.flip()
        core.wait(2)
except TypeError as e:
    print(e)
コード例 #16
0
# open a white full screen window
win = visual.Window(fullscr=True, allowGUI=False, color='white', unit='height') 

# uncomment if you use a clock. Optional because we didn't cover timing this week, 
# but you can find examples in the tutorial code 
#trialClock = core.Clock()

#%% up to you!
# this is where you build a trial that you might actually use one day!
# just try to make one trial ordering your lines of code according to the 
# sequence of events that happen on one trial
# if you're stuck you can use the responseExercise.py answer as a starting point 

# maybe start by making stimulus objects (e.g. myPic = visual.ImageStim(...))  

LetterA = visual.ImageStim(win, image=r'letters\A.jpg', pos=(0,0), size=(2,2))

#myText = ... #note default color is white, which you can't see on a white screen!
myTextquestion = visual.TextStim(win, text="Was this letter on the study list?", pos = (0,0.5), color="black")
myTextF = visual.TextStim(win, text="f = yes", pos = (-0.25,-0.5), color="black")
myTextJ = visual.TextStim(win, text="j = no", pos = (0.25,-0.5), color="black")

# then draw all stimuli
LetterA.draw()
myTextquestion.draw()
myTextF.draw()
myTextJ.draw()

# then flip your window
win.flip()
コード例 #17
0
    color=(-1, -1, -1))
txt_input = visual.TextStim(win,
                            color=(-1, -1, -1),
                            units='norm',
                            pos=(0, -0.4),
                            height=0.1)
txt_legend = visual.TextStim(
    win,
    pos=(0, 40),
    height=20,
    color=(-1, -1, -1),
    text=
    "1=>not at all\n2=>not so much\n3=> can't say\n4=>somehow motivated\n5=>really motivated"
)

grid = visual.ImageStim(win=win, image='frame.png')

square_stim = visual.ImageStim(win=win, image='box.png', units='pix')

clock = core.Clock()  #general timer

stim_pool = [{
    'area': 'a1',
    'x': -150,
    'y': 150
}, {
    'area': 'a2',
    'x': 0,
    'y': 150
}, {
    'area': 'a3',
コード例 #18
0
    color='black', colorSpace='rgb', opacity=1,
    depth=-4.0)

# Initialize components for Routine "prefixation"
prefixationClock = core.Clock()
textPreFixation = visual.TextStim(win=win, ori=0, name='textPreFixation',
    text='+',    font='Arial',
    pos=[0, 0], height=0.1, wrapWidth=None,
    color='black', colorSpace='rgb', opacity=1,
    depth=0.0)

# Initialize components for Routine "trial"
trialClock = core.Clock()
imageLeft = visual.ImageStim(win=win, name='imageLeft',
    image='sin', mask=None,
    ori=0, pos=[-0.3, 0], size=[0.3, 0.5],
    color=[1,1,1], colorSpace='rgb', opacity=1,
    flipHoriz=False, flipVert=False,
    texRes=128, interpolate=True, depth=0.0)
imageRight = visual.ImageStim(win=win, name='imageRight',
    image='sin', mask=None,
    ori=0, pos=[0.3, 0], size=[0.3, 0.5],
    color=[1,1,1], colorSpace='rgb', opacity=1,
    flipHoriz=False, flipVert=False,
    texRes=128, interpolate=True, depth=-1.0)
for n in range(10): #Cedrus connection doesn't always work first time!
    try:
        devices = pyxid.get_xid_devices()
        core.wait(0.1)
        buttonBox = devices[0]
        break #once we found the device we can break the loop
    except:
コード例 #19
0
ファイル: taste_task.py プロジェクト: niblunc/taste_task
    print 'found:', k
    if k.count(
            subdata['quit_key']) > 0:  # if subdata['quit_key'] is pressed...
        print 'quit key pressed'
        return True
    else:
        return False


# MONITOR
win = visual.Window(monSize,
                    fullscr=info['fullscr'],
                    monitor='testMonitor',
                    units='deg')
visual_stim = visual.ImageStim(win,
                               image=N.zeros((300, 300)),
                               size=(0.75, 0.75),
                               units='height')
# STIMS
fixation_text = visual.TextStim(win, text='+', pos=(0, 0), height=2)

scan_trigger_text = visual.TextStim(win,
                                    text='Waiting for scan trigger...',
                                    pos=(0, 0))

#global settings
diameter = 26.59
mls_to_deliver = 0.5
delivery_time = 2.0
cue_time = 2.0
wait_time = 2.0
rinse_time = 2.0
コード例 #20
0
def do_run(stimset):
    #instructions
    if version == 'A' or 'B':
        instruct_screen.draw()
    else:
        instruct_screen_practice.draw()
    win.flip()
    event.waitKeys(keyList=('space','2'))
    
    if stimset == 'face':
        instruct_screen1_face.draw()
        win.flip()
    else:
        instruct_screen1_image.draw()
        win.flip()
    event.waitKeys(keyList=('space','2'))
    
    instruct_screen2.draw()
    win.flip()
    event.waitKeys(keyList=('space','2'))
    
    if stimset == 'face':
        if version == 'A' or 'B':
            instruct_screen3_face.draw()
        else:
            instruct_screen3_face_practice.draw()
    else:
        if version == 'A' or 'B':
            instruct_screen3_image.draw()
        else:
            instruct_screen3_image_practice.draw()
    win.flip()
    event.waitKeys(keyList=('space','2'))
    
    #wait for scan trigger 
    if version == 'A' or 'B':
        ready_screen.draw()
    else:
        ready_screen_practice.draw()
    win.flip()
    event.waitKeys(keyList=('equal'))
    run_start = time.time()
    
    #set Version ITI, Image orders, feedback order
    pic_path = os.path.join(os.getcwd(), 'pictureFolder', f'{version}_{stimset}')
    
    #lists to store logging
    clock = core.Clock()
    clock.reset()
    onset = []
    duration = []
    condition = []
    resp_val = []
    responsetime = []
    b_1 = []
    b_2 = []
    b_3 = []

    for trial in reference.iterrows():
        trial_start = clock.getTime()
        row_counter = trial[0]
        pic_L = visual.ImageStim(win,os.path.join(pic_path, reference.loc[reference.index[row_counter], f'{version}_{stimset}_L']), pos =(-7,0),size=(11.2,17.14))
        pic_R = visual.ImageStim(win,os.path.join(pic_path, reference.loc[reference.index[row_counter], f'{version}_{stimset}_R']), pos =(7,0),size=(11.2,17.14))
        border = visual.ShapeStim(win, vertices=pic_L.verticesPix, units='pix', fillColor = 'grey', lineColor = 'grey')
        border2 = visual.ShapeStim(win, vertices=pic_R.verticesPix, units='pix', fillColor = 'grey', lineColor = 'grey')

        trial_timer = core.CountdownTimer(5.2)   
        while trial_timer.getTime() > 0:
            #1st fixation
            if stimset == 'image':
                timer = core.CountdownTimer(fixation_time + 0.034)
            else:
                timer = core.CountdownTimer(fixation_time)
            while timer.getTime() > 0:
                fixation.draw()
                win.flip()
            fixationPre_dur = clock.getTime() - trial_start
            
            #decision_phase
            timer = core.CountdownTimer(decision_time)
            resp = event.getKeys(keyList = responseKeys)
            decision_onset = clock.getTime()
            while timer.getTime() > 0:
                pic_L.draw()
                pic_R.draw()
                win.flip()
                resp = event.getKeys(keyList = responseKeys)
                if len(resp)>0:
                    if 'z' in resp:
                        log.to_csv(os.path.join("data",subj_id, f"{subj_id}_{stimset}-{version}.tsv"), sep='\t', index = False)
                        core.quit()
                    if selected == 2 or 3:
                        selected = int(resp[0])
                        resp_onset = clock.getTime()
                        rt = resp_onset - decision_onset
                        border.autoDraw=True
                        border2.autoDraw=True
                        pic_L.draw()
                        pic_R.draw()
                        win.flip()
                        core.wait(decision_time - rt)
                        break
                else:
                    selected = '999'
                    rt = '999'
                    core.wait(.25)
            decision_dur = clock.getTime() - decision_onset
            border.autoDraw=False
            border2.autoDraw=False
            
            #2nd fixation
            timer = core.CountdownTimer(fixation_time)
            fixationPost_onset = clock.getTime()
            while timer.getTime() > 0:
                fixation.draw()
                win.flip()
            fixationPost_dur = clock.getTime() - fixationPost_onset

            #feedback
            timer = core.CountdownTimer(fb_dur)
            feedback_onset = clock.getTime()
            fb_type = reference.loc[reference.index[row_counter], f'{version}_feedback']
            if fb_type == 'loss':
                while timer.getTime() > 0:
                    down_arrow.draw()
                    win.flip()   
            elif fb_type == 'win':
                while timer.getTime() > 0:
                    up_arrow.draw()
                    win.flip() 
            else:
                print('Feedback Error')
            feedback_dur = clock.getTime() - feedback_onset
            
            #ITI
            ITI_onset = clock.getTime()
            ITI = reference.loc[reference.index[row_counter], f'{version}_ITI']
            timer = core.CountdownTimer(ITI)
            while timer.getTime() > 0:
                fixation.draw()
                win.flip()
                core.wait(ITI)
            ITI_dur = clock.getTime() - ITI_onset
            
            #logging
            condition.append('fixation_1')
            onset.append(trial_start)
            duration.append(fixationPre_dur)
            resp_val.append('999')
            responsetime.append('999')

            condition.append('face')
            onset.append(decision_onset)
            duration.append(decision_dur)
            resp_val.append(selected)
            responsetime.append(rt)
            
            condition.append('fixation_2')
            onset.append(fixationPost_onset)
            duration.append(fixationPre_dur)
            resp_val.append('999')
            responsetime.append('999')
            
            condition.append('feedback ' + fb_type)
            onset.append(feedback_onset)
            duration.append(feedback_dur)
            resp_val.append('999')
            responsetime.append('999')
           
            condition.append('ITI')
            onset.append(ITI_onset)
            duration.append(ITI_dur)
            resp_val.append('999')
            responsetime.append('999')
            
            #BIDS Log
            b_1.append(decision_onset)
            b_2.append(decision_dur)
            b_3.append(fb_type)

        #data to frame 
        log = pd.DataFrame(
                {'onset':onset, 
                'duration':duration,
                'trial_type':condition,
                'rt':responsetime,
                'resp':resp_val})

        bidsEvents = pd.DataFrame(
                {'onset':b_1, 
                'duration':b_2,
                'trial_type':b_3})
        log.to_csv(os.path.join("data",subj_id, f"sub-{subj_id}_{stimset}-{version}.tsv"), sep='\t', index = False)
        bidsEvents.to_csv(os.path.join("data",subj_id, f"sub-{subj_id}_task-socialReward-{stimset}-{version}.tsv"), sep='\t', index = False)
    run_end = time.time()
    run_length = run_end -run_start
    print(run_length)
    event.clearEvents()
    return;
コード例 #21
0
win = visual.Window(size=(800, 600), fullscr=True, screen=1, allowGUI=False, allowStencil=False,
    monitor='testMonitor', color=[0,0,0], colorSpace='rgb',
    blendMode='avg', useFBO=True,
    )
# store frame rate of monitor if we can measure it successfully
expInfo['frameRate']=win.getActualFrameRate()
if expInfo['frameRate']!=None:
    frameDur = 1.0/round(expInfo['frameRate'])
else:
    frameDur = 1.0/60.0 # couldn't get a reliable measure so guess

# Initialize components for Routine "happy_1"
happy_1Clock = core.Clock()
top = visual.ImageStim(win=win, name='top',
    image='sin', mask=None,
    ori=0, pos=[0, 0.4], size=[0.25, 0.5],
    color=[1,1,1], colorSpace='rgb', opacity=1,
    flipHoriz=False, flipVert=False,
    texRes=128, interpolate=True, depth=0.0)
left = visual.ImageStim(win=win, name='left',
    image='sin', mask=None,
    ori=0, pos=[-0.3, -0.2], size=[0.25, 0.5],
    color=[1,1,1], colorSpace='rgb', opacity=1,
    flipHoriz=False, flipVert=False,
    texRes=128, interpolate=True, depth=-1.0)
right = visual.ImageStim(win=win, name='right',
    image='sin', mask=None,
    ori=0, pos=[0.3, -0.2], size=[0.25, 0.5],
    color=[1,1,1], colorSpace='rgb', opacity=1,
    flipHoriz=False, flipVert=False,
    texRes=128, interpolate=True, depth=-2.0)
コード例 #22
0
 def loadImage(filename):
     return visual.ImageStim(win=mywin, image=filename)
コード例 #23
0
def context_binding(subject_stim, context_bind_items, count):
    encoding_pres_items = subject_stim[subject_stim['Part'] == 'encoding']

    temp_instr = visual.TextStim(win, instr[16], color='black', pos=[0, 0])
    temp_instr.draw()
    win.update()
    event.waitKeys(keyList=['space'])
    win.flip()

    random.shuffle(character_list)
    for character in character_list:
        if event.getKeys(['escape']):
            win.close()
            core.quit()
        subject_stim['Scene'].iloc[n].split('_')[0]
        items_already_pres = subject_stim['Item'].tolist(
        ) + subject_stim['Lure_1'].tolist() + subject_stim['Lure_2'].tolist()

        # Get two random category lures
        cb_items = random.sample(
            encoding_pres_items[encoding_pres_items['Character'] == character]
            ['Item'].tolist(), 3)
        # Get lure items that were not presented during encoding and set random color
        scene_cb = encoding_pres_items[encoding_pres_items['Item'] ==
                                       cb_items[0]]['Scene'].iloc[0]
        target_cb = encoding_pres_items[encoding_pres_items['Item'] ==
                                        cb_items[0]][[
                                            'Item', 'Color'
                                        ]].iloc[0].str.cat(sep='_')
        lure1_cb = encoding_pres_items[encoding_pres_items['Item'] ==
                                       cb_items[1]][[
                                           'Item', 'Color'
                                       ]].iloc[0].str.cat(sep='_')
        lure2_cb = encoding_pres_items[encoding_pres_items['Item'] ==
                                       cb_items[2]][[
                                           'Item', 'Color'
                                       ]].iloc[0].str.cat(sep='_')
        context_bind_items = context_bind_items + [
            target_cb.rsplit('_', 1)[0],
            lure1_cb.rsplit('_', 1)[0],
            lure2_cb.rsplit('_', 1)[0]
        ]
        subject_stim.loc[count, 'Part'] = 'context_binding'
        subject_stim.loc[count, 'Character'] = character
        subject_stim.loc[count, 'Scene'] = scene_cb
        subject_stim.loc[count, 'Item'] = target_cb.rsplit('_', 1)[0]
        subject_stim.loc[count, 'Color'] = target_cb.rsplit('_', 1)[1]
        subject_stim.loc[count, 'Lure_1'] = lure1_cb
        subject_stim.loc[count, 'Lure_2'] = lure2_cb

        # Present stimuli
        scene_stim = Image.open(scene_dir + scene_cb + '.png')
        lure1_stim = Image.open(item_dir + character + '/' + [
            i for i in os.listdir(item_dir + character + '/')
            if i.startswith(lure1_cb + '.png')
        ][0])
        lure2_stim = Image.open(item_dir + character + '/' + [
            i for i in os.listdir(item_dir + character + '/')
            if i.startswith(lure2_cb + '.png')
        ][0])
        target_stim = Image.open(item_dir + character + '/' + [
            i for i in os.listdir(item_dir + character + '/')
            if i.startswith(target_cb + '.png')
        ][0])
        char_stim.thumbnail(item_size, Image.ANTIALIAS)
        lure1_stim.thumbnail(item_size, Image.ANTIALIAS)
        lure2_stim.thumbnail(item_size, Image.ANTIALIAS)
        target_stim.thumbnail(item_size, Image.ANTIALIAS)
        stim_pos = [[-0.5, -0.6], [0, -0.6], [0.5, -0.6]]
        random.shuffle(stim_pos)
        scene_pres = visual.ImageStim(win, scene_stim, pos=[0, 0.35])
        lure1_pres = visual.ImageStim(win, lure1_stim, pos=stim_pos[0])
        lure2_pres = visual.ImageStim(win, lure2_stim, pos=stim_pos[1])
        target_pres = visual.ImageStim(win, target_stim, pos=stim_pos[2])

        scene_pres.draw()
        lure1_pres.draw()
        lure2_pres.draw()
        target_pres.draw()
        win.update()
        timer.reset()

        # Record response and give feedback
        while True:
            if mouse.isPressedIn(target_pres):
                subject_stim.loc[count, 'Reaction_Time'] = timer.getTime()
                subject_stim.loc[count, 'Answer'] = target_cb
                break
            elif mouse.isPressedIn(lure1_pres):
                subject_stim.loc[count, 'Reaction_Time'] = timer.getTime()
                subject_stim.loc[count, 'Answer'] = lure1_cb
                break
            elif mouse.isPressedIn(lure2_pres):
                subject_stim.loc[count, 'Reaction_Time'] = timer.getTime()
                subject_stim.loc[count, 'Answer'] = lure2_cb
                break
        win.update()
        win.flip()
        fix_pres = scene_pres = visual.ImageStim(win, fixation, pos=[0, 0])
        fix_pres.draw()
        win.update()
        core.wait(time_fixcr)
        win.flip()
        subject_stim.to_csv(save_subj_file_name)
        count = count + 1

    return subject_stim, context_bind_items, count
コード例 #24
0
                            wrapWidth=screen_size[0] - 400,
                            height=screen_size[1] / 15,
                            alignHoriz='center',
                            colorSpace='rgb',
                            color=[1, -1, -1],
                            bold=True)

choice_emphasis = visual.Rect(win=window,
                              units='pix',
                              height=screen_size[0] / 7,
                              width=screen_size[0] / 7,
                              lineColorSpace='rgb',
                              lineColor=[1, 1, 1],
                              lineWidth=5)
purple_block = visual.ImageStim(window,
                                image='./images/purple_block.png',
                                units='pix',
                                size=[screen_size[0] / 15])
orange_block = visual.ImageStim(window,
                                image='./images/orange_block.png',
                                units='pix',
                                size=[screen_size[0] / 15])
coin = visual.ImageStim(window,
                        image='./images/coin.png',
                        units='pix',
                        size=[screen_size[0] / 20],
                        pos=[0, 200])
treasure_chest = visual.ImageStim(window,
                                  image='./images/treasure_chest.png',
                                  units='pix',
                                  size=[screen_size[0] / 18],
                                  pos=[800, screen_size[1] / 2.5])
コード例 #25
0
# create a default keyboard (e.g. to check for escape)
defaultKeyboard = keyboard.Keyboard()

# Initialize components for Routine "instruction"
instructionClock = core.Clock()
instrTxt = visual.TextStim(win=win, name='instrTxt',
    text='برای مواجهه با یک آزمون چالشی جدید آماده ای؟\n\nبخاطر داشته باش، واژه ها را در نظر نگیر و \nرنگها را انتخاب کن\n\n\nبرای ادامه شروع را انتخاب کنید\n',
    font='Arial',
    pos=[0, 0], height=0.05, wrapWidth=None, ori=0, 
    color=[1, 1, 1], colorSpace='rgb', opacity=1, 
    languageStyle='Arabic',
    depth=0.0);
imgStart = visual.ImageStim(
    win=win,
    name='imgStart', units='pix', 
    image='Start.jpg', mask=None,
    ori=0, pos=[0, -310], size=[125, 125],
    color=[1,1,1], colorSpace='rgb', opacity=1,
    flipHoriz=False, flipVert=False,
    texRes=128, interpolate=True, depth=-1.0)
resBegin = event.Mouse(win=win)
x, y = [None, None]
resBegin.mouseClock = core.Clock()

# Initialize components for Routine "trial"
trialClock = core.Clock()
targetTxt = visual.TextStim(win=win, name='targetTxt',
    text='default text',
    font='Arial',
    pos=[10,0], height=0.1, wrapWidth=None, ori=0, 
    color='white', colorSpace='rgb', opacity=1, 
    languageStyle='Arabic',
コード例 #26
0
                              level=logging.getLevel('TRIG'))

# set-up the triggers
trigger = triggers.Trigger(
    port=io.getSessionMetaData()['user_variables']['hw']['parallel_port'])

for phase in exp_structure:
    # exp.addLoop(phase['trials'])  # register current trial in experiment structure
    io.clearEvents()
    io.sendMessageEvent('BEGIN PHASE {}'.format(phase['name']), category='EXP')
    io.createTrialHandlerRecordTable(phase)

    # show instructions
    # this is only true before the subject's learning phase
    if phase['img']:
        img = visual.ImageStim(win, image=phase['img'])
        img.draw()
        win.flip()
        percepts.waitKeyPress(io, key=' ', timeout=20)

    # show the current phase
    text = visual.TextStim(win, text=phase['text'])
    text.draw()
    win.flip()
    percepts.waitKeyPress(io, key=' ', timeout=20)

    for trial in phase['trials']:
        io.addTrialHandlerRecord(trial)
        if not trial.thisN % 28:
            # every 28 trials (start and halfway the 14×4 trials in testing), give the subject a big, big break, then launch a calibration ?
            pass  # TODO launch calibration
コード例 #27
0
ファイル: Instructions.py プロジェクト: SNaGLab/BART_baseline
def Run_AllIntro(MyWin, path, skip, competitive):
    '''
    Runs all pieces of the introduction of experiment
    '''
    Instruct = Instructions(MyWin, path)
    if skip not in ['all', 'tutorial']:
        Tutorial.run_Tutorial(MyWin, competitive)  # Run the tutorial

    if skip != 'all':
        Questions = questions('/Users/JMP/Desktop/', MyWin)
        if competitive == '1':
            File = open(os.getcwd() + '/instruction_quests.json')
        else:
            File = open(os.getcwd() + '/instruction_quests 2.json')
        j = json.load(File)  # Import json with questions for quiz

        # Ask questions
        for item in j[:-1]:
            print item[0]
            print item[1]
            print item[2]
            Questions.UnderstandingQuestionWithField(item[0], item[1], item[2])
        Questions.UnderstandingQuestionWithTrueFalse(j[-1][0], j[-1][1],
                                                     j[-1][2])

        # Get maximum rating and distributions
        Max = Instruct.MaxRatings()
        vid = visual.MovieStim3(MyWin,
                                os.getcwd() + '/Resources/dist_vid.mp4',
                                size=[700, 400],
                                pos=[0, -150])
        distIm = visual.ImageStim(MyWin,
                                  os.getcwd() + '/Resources/DistImage.png',
                                  size=[0.8, 0.8],
                                  pos=[0, -.5])
        bButton = Tutorial.InstructionBox(
            MyWin, [0, 0.5],
            "The scale below represents the size of a balloon from 0 pumps (leftmost column) to your estimated maximum balloon size (rightmost column). Each column represents a slightly larger balloon. \nAt what size do you think the balloons in this task are most likely to pop? You can place a bet by tapping one of the columns with your cursor.",
            True)
        bButton.buttonwait(extras=[distIm])

        singleDist = Distributor(
            MyWin,
            Max,
            instructions=
            'Using only one bet, where do you think the balloons are most likely to pop?',
            maxTotal=1).initialize()
        print singleDist
        with open(path + '/singleBet.csv', mode='a') as MyFile:
            X = ','.join(str(e) for e in singleDist)
            MyFile.write('%s\n' % X)

        bButton = Tutorial.InstructionBox(
            MyWin, [0, 0.5],
            "Imagine that you would play 50 balloons. At what size do you think these balloons will pop?\n\n You can place a bet by tapping one of the columns with your cursor. The more bets you place in a column, the more you expect that the balloons will pop at that size.",
            True)
        bButton.buttonwait(extras=[vid])
        bButton = Tutorial.InstructionBox(
            MyWin, [0, 0.5],
            "We will pay you for the accuracy of your bets by comparing your bets against one randomly drawn popped balloon from the experiment today. The more bets you place on the correct column, the more you win.",
            True)
        bButton.buttonwait(extras=[vid])

        Instruct.dists(0, Max)

        bButton = Tutorial.InstructionBox(
            MyWin, [0, 0.5],
            "With the same scale as before we would like you to indicate where you think other participants in todays session will pump to before they cash in.",
            True)
        bButton.buttonwait(extras=[distIm])

        singleDist = Distributor(
            MyWin,
            Max,
            instructions=
            "Using only one bet, where do you think other participants in today's session are likely to pump to and cash in?",
            maxTotal=1).initialize()
        print singleDist
        with open(path + '/singleBet.csv', mode='a') as MyFile:
            X = ','.join(str(e) for e in singleDist)
            MyFile.write('%s\n' % X)

        bButton = Tutorial.InstructionBox(
            MyWin, [0, 0.5],
            "Imagine that you would see another player from today's session play with 50 balloons. How many pumps do you think that player will make before cashing in?",
            True)
        bButton.buttonwait(extras=[vid])

        bButton = Tutorial.InstructionBox(
            MyWin, [0, 0.5],
            "We will pay you for the accuracy of your rating by comparing your bets against one randomly drawn cashed in balloon from another participant in today's experiment. The more bets that you place in the correct column, the more you win..",
            True)
        bButton.buttonwait(extras=[vid])
        Instruct.dists(1, Max)
コード例 #28
0
ファイル: grating.py プロジェクト: krg-nandu/stimuli-display
def displayCircularStimuli(directions, colors, colors_wheel, thicknesses,
                           speeds, duration):
    global mywin, plate, white
    mywin = visual.Window([1280, 1024],
                          monitor="Dell Inc. 17",
                          units="pix",
                          fullscr=False,
                          screen=1,
                          color='white')
    plate = visual.Rect(win=mywin,
                        size=(width_plate, height_plate),
                        lineColor=[0, 0, 0],
                        lineColorSpace="rgb255",
                        lineWidth=4)
    white = visual.ImageStim(win=mywin,
                             image="Solid_white.png",
                             size=(1280, 1024),
                             pos=[0, 0])

    ops = []
    for d in directions:
        if d == 'Clockwise':
            ops.append('+')
        else:
            ops.append('-')

    shape1 = visual.ShapeStim(mywin,
                              units='',
                              lineWidth=thicknesses[0],
                              lineColor=colors_wheel[0],
                              lineColorSpace='rgb',
                              fillColor='red',
                              fillColorSpace='rgb',
                              vertices=np.multiply(wheel, scalingFactor),
                              windingRule=None,
                              closeShape=True,
                              pos=(-width_plate / 6.4, height_plate / 8.2),
                              size=1,
                              ori=0.0,
                              opacity=1.0,
                              contrast=1.0,
                              depth=0,
                              interpolate=True,
                              name=None,
                              autoLog=None,
                              autoDraw=False)

    shape1b = visual.Circle(mywin,
                            radius=scalingFactor,
                            fillColor=colors[0],
                            pos=(-width_plate / 6.4, height_plate / 8.2))

    shape2 = visual.ShapeStim(mywin,
                              units='',
                              lineWidth=thicknesses[1],
                              lineColor=colors_wheel[1],
                              lineColorSpace='rgb',
                              fillColor='red',
                              fillColorSpace='rgb',
                              vertices=np.multiply(wheel, scalingFactor),
                              windingRule=None,
                              closeShape=True,
                              pos=(0, height_plate / 8.2),
                              size=1,
                              ori=0.0,
                              opacity=1.0,
                              contrast=1.0,
                              depth=0,
                              interpolate=True,
                              name=None,
                              autoLog=None,
                              autoDraw=False)

    shape2b = visual.Circle(mywin,
                            radius=scalingFactor,
                            fillColor=colors[1],
                            pos=(0, height_plate / 8.2))

    shape3 = visual.ShapeStim(mywin,
                              units='',
                              lineWidth=thicknesses[2],
                              lineColor=colors_wheel[2],
                              lineColorSpace='rgb',
                              fillColor='red',
                              fillColorSpace='rgb',
                              vertices=np.multiply(wheel, scalingFactor),
                              windingRule=None,
                              closeShape=True,
                              pos=(width_plate / 6.4, height_plate / 8.2),
                              size=1,
                              ori=0.0,
                              opacity=1.0,
                              contrast=1.0,
                              depth=0,
                              interpolate=True,
                              name=None,
                              autoLog=None,
                              autoDraw=False)

    shape3b = visual.Circle(mywin,
                            radius=scalingFactor,
                            fillColor=colors[2],
                            pos=(width_plate / 6.4, height_plate / 8.2))

    shape4 = visual.ShapeStim(mywin,
                              units='',
                              lineWidth=thicknesses[3],
                              lineColor=colors_wheel[3],
                              lineColorSpace='rgb',
                              fillColor='red',
                              fillColorSpace='rgb',
                              vertices=np.multiply(wheel, scalingFactor),
                              windingRule=None,
                              closeShape=True,
                              pos=(-width_plate / 6.4, -height_plate / 8.2),
                              size=1,
                              ori=0.0,
                              opacity=1.0,
                              contrast=1.0,
                              depth=0,
                              interpolate=True,
                              name=None,
                              autoLog=None,
                              autoDraw=False)

    shape4b = visual.Circle(mywin,
                            radius=scalingFactor,
                            fillColor=colors[3],
                            pos=(-width_plate / 6.4, -height_plate / 8.2))

    shape5 = visual.ShapeStim(mywin,
                              units='',
                              lineWidth=thicknesses[4],
                              lineColor=colors_wheel[4],
                              lineColorSpace='rgb',
                              fillColor='red',
                              fillColorSpace='rgb',
                              vertices=np.multiply(wheel, scalingFactor),
                              windingRule=None,
                              closeShape=True,
                              pos=(0, -height_plate / 8.2),
                              size=1,
                              ori=0.0,
                              opacity=1.0,
                              contrast=1.0,
                              depth=0,
                              interpolate=True,
                              name=None,
                              autoLog=None,
                              autoDraw=False)

    shape5b = visual.Circle(mywin,
                            radius=scalingFactor,
                            fillColor=colors[4],
                            pos=(0, -height_plate / 8.2))

    shape6 = visual.ShapeStim(mywin,
                              units='',
                              lineWidth=thicknesses[5],
                              lineColor=colors_wheel[5],
                              lineColorSpace='rgb',
                              fillColor='red',
                              fillColorSpace='rgb',
                              vertices=np.multiply(wheel, scalingFactor),
                              windingRule=None,
                              closeShape=True,
                              pos=(width_plate / 6.4, -height_plate / 8.2),
                              size=1,
                              ori=0.0,
                              opacity=1.0,
                              contrast=1.0,
                              depth=0,
                              interpolate=True,
                              name=None,
                              autoLog=None,
                              autoDraw=False)

    shape6b = visual.Circle(mywin,
                            radius=scalingFactor,
                            fillColor=colors[5],
                            pos=(width_plate / 6.4, -height_plate / 8.2))

    mywin.winHandle.maximize()
    mywin.winHandle.set_fullscreen(True)
    mywin.winHandle.activate()

    # display white
    while True:
        white.draw()
        plate.draw()
        mywin.flip()
        if event.waitKeys(0.5) == ["escape"]:
            break
    event.clearEvents()
    startCamera(duration)
    clock = core.Clock()
    for frameN in range(duration):
        white.draw()
        plate.draw()

        shape1.setOri(speeds[0], operation=ops[0])
        shape1b.draw()
        shape1.draw()

        shape2.setOri(speeds[1], operation=ops[1])
        shape2b.draw()
        shape2.draw()

        shape3.setOri(speeds[2], operation=ops[2])
        shape3b.draw()
        shape3.draw()

        shape4.setOri(speeds[3], operation=ops[3])
        shape4b.draw()
        shape4.draw()

        shape5.setOri(speeds[4], operation=ops[4])
        shape5b.draw()
        shape5.draw()

        shape6.setOri(speeds[5], operation=ops[5])
        shape6b.draw()
        shape6.draw()

        mywin.logOnFlip(level=logging.CRITICAL, msg='sent on actual flip')
        mywin.flip()

    for frameN in range(300):
        white.draw()
        plate.draw()
        mywin.flip()

    mywin.close()
    core.quit()
コード例 #29
0
#Column headers for output file
outStr = "Time\tValueDisplayed\tButtonPressed(z=yes,m=no)\tResponseTime\t"
outputFile.write(outStr + "eol\n")

# Setup the Psycho variables (screen, stimuli, sounds, ect)
win = visual.Window(fullscr=True, screen=0, allowGUI=False, allowStencil=False, monitor='FenskeLabTestingComps', color=[0,0,0], colorSpace='rgb', units='deg')
mon = monitors.Monitor('FenskeLabTestingComps')
trialClock = core.Clock()
eventClock = core.Clock()
evalClock = core.Clock()
keyResp = event.BuilderKeyResponse()  # create an object of type KeyResponse

#Base trial value, will change when task starts
trialValue = 10.00

instructionsOne = visual.ImageStim(win=win,image = 'Slide16.jpg', pos=[0,0], size=(20,15))
instructionsOne.setAutoDraw(True)
win.flip()
event.waitKeys()
instructionsOne.setAutoDraw(False)

instructionsTwo = visual.ImageStim(win=win,image = 'Slide17.jpg', pos=[0,0], size=(20,15))
instructionsTwo.setAutoDraw(True)
win.flip()
event.waitKeys()
instructionsTwo.setAutoDraw(False)
win.flip()

valueArray = [0.50,1.00,1.50,2.00,2.50,3.00,3.50,4.00,4.50,5.00,5.50,6.00,6.50,7.00,7.50,8.00,8.50,9.00,9.50,10.00]
random.shuffle(valueArray)
コード例 #30
0
ファイル: threads.py プロジェクト: Ismail774403783/-
    def run(self, *args):
        """
        The run method contains your experiment logic. It is equal to what would be in your main psychopy experiment
        script.py file in a standard psychopy experiment setup. That is all there is too it really.
        """

        self.trial_conditions = ExperimentVariableProvider(
            'trial_conditions.xls', 'BLOCK', None, False, True)
        self.hub.initializeConditionVariableTable(self.trial_conditions)

        selected_eyetracker_name = args[0]
        # Let's make some short-cuts to the devices we will be using in this 'experiment'.
        tracker = self.hub.devices.tracker
        display = self.hub.devices.display
        kb = self.hub.devices.kb
        mouse = self.hub.devices.mouse

        # Create a psychopy window, full screen resolution, full screen mode...
        #
        res = display.getPixelResolution()
        window = visual.Window(res,
                               monitor=display.getPsychopyMonitorName(),
                               units=display.getCoordinateType(),
                               fullscr=True,
                               allowGUI=False,
                               screen=display.getIndex())

        # Hide the 'system mouse cursor'
        #
        mouse.setSystemCursorVisibility(False)

        # Start by running the eye tracker default setup procedure.
        # if validation results are returned, they would be in the form of a dict,
        # so print them, otherwise just check that EYETRACKER_OK was returned.
        #
        # minimize the psychopy experiment window
        #
        window.winHandle.minimize()
        result = tracker.runSetupProcedure()
        if isinstance(result, dict):
            print "Validation Accuracy Results: ", result
        elif result != EyeTrackerConstants.EYETRACKER_OK:
            print "An error occurred during eye tracker user setup: ", EyeTrackerConstants.getName(
                result)
        # restore the psychopy experiment window
        #
        window.winHandle.maximize()
        window.winHandle.activate()
        # Create a dict of image stim for trials and a gaze blob to show gaze position.
        #
        display_coord_type = display.getCoordinateType()
        image_cache = dict()
        image_names = [
            'canal.jpg', 'fall.jpg', 'party.jpg', 'swimming.jpg', 'lake.jpg'
        ]

        for iname in image_names:
            image_cache[iname] = visual.ImageStim(window,
                                                  image=os.path.join(
                                                      './images/', iname),
                                                  name=iname,
                                                  units=display_coord_type)

        gaze_dot = visual.GratingStim(window,
                                      tex=None,
                                      mask="gauss",
                                      pos=(0, 0),
                                      size=(66, 66),
                                      color='green',
                                      units=display_coord_type)
        instructions_text_stim = visual.TextStim(window,
                                                 text='',
                                                 pos=[0, 0],
                                                 height=24,
                                                 color=[-1, -1, -1],
                                                 colorSpace='rgb',
                                                 alignHoriz='center',
                                                 alignVert='center',
                                                 wrapWidth=window.size[0] * .9)

        # Update Instruction Text and display on screen.
        # Send Message to ioHub DataStore with Exp. Start Screen display time.
        #
        instuction_text = "Press Any Key to Start Experiment."
        instructions_text_stim.setText(instuction_text)
        instructions_text_stim.draw()
        flip_time = window.flip()
        self.hub.sendMessageEvent(text="EXPERIMENT_START", sec_time=flip_time)

        # wait until a key event occurs after the instructions are displayed
        self.hub.clearEvents('all')
        while not kb.getEvents():
            self.hub.wait(0.2)

        # Send some information to the ioHub DataStore as experiment messages
        # including the eye tracker being used for this session.
        #
        self.hub.sendMessageEvent(text="IO_HUB EXPERIMENT_INFO START")
        self.hub.sendMessageEvent(text="ioHub Experiment started {0}".format(
            getCurrentDateTimeString()))
        self.hub.sendMessageEvent(
            text="Experiment ID: {0}, Session ID: {1}".format(
                self.hub.experimentID, self.hub.experimentSessionID))
        self.hub.sendMessageEvent(
            text="Stimulus Screen ID: {0}, Size (pixels): {1}, CoordType: {2}".
            format(display.getIndex(), display.getPixelResolution(),
                   display.getCoordinateType()))
        self.hub.sendMessageEvent(
            text="Calculated Pixels Per Degree: {0} x, {1} y".format(
                *display.getPixelsPerDegree()))
        self.hub.sendMessageEvent(text="Eye Tracker being Used: {0}".format(
            selected_eyetracker_name))
        self.hub.sendMessageEvent(text="IO_HUB EXPERIMENT_INFO END")

        practice_blocks = self.trial_conditions.getPracticeBlocks()
        exp_blocks = self.trial_conditions.getExperimentBlocks()
        block_types = [practice_blocks, exp_blocks]

        for blocks in block_types:
            # for each block in the group of blocks.....
            for trial_set in blocks.getNextConditionSet():
                self.hub.clearEvents('all')
                t = 0
                for trial in trial_set.getNextConditionSet():
                    # Update the instuction screen text...
                    #
                    instuction_text = "Press Space Key To Start Trial %d" % t
                    instructions_text_stim.setText(instuction_text)
                    instructions_text_stim.draw()
                    flip_time = window.flip()
                    self.hub.sendMessageEvent(text="EXPERIMENT_START",
                                              sec_time=flip_time)

                    start_trial = False

                    # wait until a space key 'press' event occurs after the instructions are displayed
                    self.hub.clearEvents('all')
                    while not start_trial:
                        for event in kb.getEvents(
                                event_type_id=EventConstants.KEYBOARD_PRESS):
                            if event.key == ' ':
                                start_trial = True
                                break
                        self.hub.wait(0.2)

                    # So request to start trial has occurred...
                    # Clear the screen, start recording eye data, and clear all events
                    # received to far.
                    #
                    flip_time = window.flip()
                    trial['session_id'] = self.hub.getSessionID()
                    trial['trial_id'] = t + 1
                    trial['TRIAL_START'] = flip_time
                    self.hub.sendMessageEvent(text="TRIAL_START",
                                              sec_time=flip_time)
                    self.hub.clearEvents('all')
                    tracker.setRecordingState(True)

                    # Get the image name for this trial
                    #
                    imageStim = image_cache[trial['IMAGE_NAME']]

                    # Loop until we get a keyboard event
                    #
                    run_trial = True
                    while run_trial is True:
                        # Get the latest gaze position in dispolay coord space..
                        #
                        gpos = tracker.getLastGazePosition()
                        if isinstance(gpos, (tuple, list)):
                            # If we have a gaze position from the tracker, draw the
                            # background image and then the gaze_cursor.
                            #
                            gaze_dot.setPos(gpos)
                            imageStim.draw()
                            gaze_dot.draw()
                        else:
                            # Otherwise just draw the background image.
                            #
                            imageStim.draw()

                        # flip video buffers, updating the display with the stim we just
                        # updated.
                        #
                        flip_time = window.flip()

                        # Send a message to the ioHub Process / DataStore indicating
                        # the time the image was drawn and current position of gaze spot.
                        #
                        if isinstance(gpos, (tuple, list)):
                            self.hub.sendMessageEvent(
                                "IMAGE_UPDATE %s %.3f %.3f" %
                                (iname, gpos[0], gpos[1]),
                                sec_time=flip_time)
                        else:
                            self.hub.sendMessageEvent(
                                "IMAGE_UPDATE %s [NO GAZE]" % (iname),
                                sec_time=flip_time)

                        # Check any new keyboard char events for a space key.
                        # If one is found, set the trial end variable.
                        #
                        for event in kb.getEvents(
                                event_type_id=EventConstants.KEYBOARD_PRESS):
                            if event.key == ' ':
                                run_trial = False
                                break

                    # So the trial has ended, send a message to the DataStore
                    # with the trial end time and stop recording eye data.
                    # In this example, we have no use for any eye data between trials, so why save it.
                    #
                    flip_time = window.flip()
                    trial['TRIAL_END'] = flip_time
                    self.hub.sendMessageEvent(text="TRIAL_END %d" % t,
                                              sec_time=flip_time)
                    tracker.setRecordingState(False)
                    # Save the Experiment Condition Variable Data for this trial to the
                    # ioDataStore.
                    #
                    self.hub.addRowToConditionVariableTable(trial.tolist())
                    self.hub.clearEvents('all')
                    t += 1

        # Disconnect the eye tracking device.
        #
        tracker.setConnectionState(False)

        # Update the instuction screen text...
        #
        instuction_text = "Press Any Key to Exit Demo"
        instructions_text_stim.setText(instuction_text)
        instructions_text_stim.draw()
        flip_time = window.flip()
        self.hub.sendMessageEvent(text="SHOW_DONE_TEXT", sec_time=flip_time)

        # wait until any key is pressed
        self.hub.clearEvents('all')
        while not kb.getEvents(event_type_id=EventConstants.KEYBOARD_PRESS):
            self.hub.wait(0.2)

        # So the experiment is done, all trials have been run.
        # Clear the screen and show an 'experiment  done' message using the
        # instructionScreen state. What for the trigger to exit that state.
        # (i.e. the space key was pressed)
        #
        flip_time = window.flip()
        self.hub.sendMessageEvent(text='EXPERIMENT_COMPLETE',
                                  sec_time=flip_time)