Esempio n. 1
0
 def show_message_screen(self, screen):
     self.mouse.set_visible(True)
     self.disp.fill(screen)
     self.disp.show()
     self.mouse.get_clicked()
     self.mouse.set_visible(False)
     libtime.pause(500)
Esempio n. 2
0
    def run_exp(self, test_mode=False):
        libtime.expstart()
        self.user_interface.show_experiment_start_screen()
#        self.user_interface.show_practice_start_screen()
            
        for i in range(1, N_BLOCKS+1):
            # Taking club-deck as reference, 
            # REMEMBER: rewards[0] is reward for T; rewards[1] is L         
            block_info = self.run_block(i)
            
            self.data_access.write_block_log(block_info)
                    
            # Here the value is changed in every block / iteration of 'for' loop
            # So it starts with e.g. is_take_left = True and changed after the next iteration to = False, 
            # and after the second iteration the False is Not more (i.e., True)
            # TODO: implement different ways of counterbalancing (alternating vs. random)
#            if COUNTERBALANCE == 'alternate':
#                is_take_left = not is_take_left
#            else:
#                is_take_left = not is_take_left
        self.eye_tracker.close()    
        libtime.pause(500)
        
        self.user_interface.show_experiment_end_screen()
        
        # With this function ALL of the screens are ended
        self.user_interface.close()
Esempio n. 3
0
    def show_trial_end_screen(self, points_earned, accumulated_points,
                              trial_info):
        self.mouse.set_visible(True)

        shock_delivered = False
        rnd = random.uniform(0, 1)
        if ((trial_info['is_threat'] == True) and \
            (trial_info['option_chosen'] == 'T') and \
            (rnd < SHOCK_PROB)):
            self.shocker_tone.play()
            shock_delivered = True

        outcome = 'earned'
        if points_earned < 0:
            outcome = 'lost'
        elif points_earned == 0:
            outcome = 'foregone'

        self.points_earned_stim.setText(text='You have %s %i points' %
                                        (outcome, trial_info['target_num']))
        self.accumulated_points_stim.setText(text='Accumulated points: %i out of %i' % \
                                            (accumulated_points, trial_info['threshold']))

        self.disp.fill(self.trial_end_screen)
        self.disp.show()

        self.mouse.get_clicked()

        libtime.pause(500)

        return shock_delivered
Esempio n. 4
0
    def show_block_end_screen(self, is_practice, accumulated_points):
        self.mouse.set_visible(True)
        self.block_end_screen = libscreen.Screen()
        block_end_text = \
            '''You have completed this experimental block. \nYour score is %i points.
            \nClick left mouse button to proceed.''' % (accumulated_points)

        self.block_end_screen.draw_text(text=block_end_text, fontsize=18)
        self.disp.fill(self.block_end_screen)
        self.disp.show()
        self.mouse.get_clicked()
        libtime.pause(200)
Esempio n. 5
0
 def show_block_intro_screen(self, block_size, is_practice):
     self.mouse.set_visible(True)
     self.block_intro_screen = libscreen.Screen()
     block_type = 'practice' if is_practice else 'recorded'
     self.block_intro_screen.draw_text(
         text='You are about to start the block of %d %s trials.\
                                 To start click left mouse button.' %
         (block_size, block_type),
         fontsize=18)
     self.disp.fill(self.block_intro_screen)
     self.disp.show()
     self.mouse.get_clicked()
     libtime.pause(200)
Esempio n. 6
0
    def show_feedback_screen(self, points_earned, accumulated_points):
        self.mouse.set_visible(True)
        points_earned_str = '%i points'
        if points_earned > 0:
            self.feedback_text.setText('Correct!')
            self.feedback_text.setColor((52, 201, 64))
            self.feedback_points_earned.setColor((52, 201, 64))
            points_earned_str = '+' + points_earned_str
        elif points_earned < 0:
            self.feedback_text.setText('Incorrect!')
            self.feedback_text.setColor((196, 46, 46))
            self.feedback_points_earned.setColor((196, 46, 46))

        self.feedback_points_earned.setText(points_earned_str %
                                            (points_earned))
        self.feedback_accumulated_points.setText('Accumulated points: %i' %
                                                 (accumulated_points))
        self.disp.fill(self.feedback_screen)
        self.disp.show()
        libtime.pause(500)
        self.mouse.get_clicked()
Esempio n. 7
0
 def presentAGTrial(self,curTrial, getInput,AGTime):
     
     libtime.pause(self.ISI)
     
     self.experiment.disp.show()
     
     if curTrial['AGType']=="image":
         #create picture
         curPic=self.pictureMatrix[curTrial['image']][0]
         curPic.pos=(0,0)
         
         #build screen
         agScreen=libscreen.Screen(disptype='psychopy')
         buildScreenPsychoPy(agScreen, [curPic])
         #wait 1 s
         libtime.pause(self.agWait1)
         #present screen
         setAndPresentScreen(self.experiment.disp, agScreen)
         #play audio
         playAndWait(self.soundMatrix[curTrial['audio']],waitFor=0)
         #display for rest of ag Time
         libtime.pause(AGTime)
     
     elif curTrial['AGType']=="movie":
         #load movie stim
         print(self.experiment.moviePath)
         print(curTrial['AGVideo'])
         mov = visual.MovieStim3(self.experiment.win, self.experiment.moviePath+curTrial['AGVideo'])
         while mov.status != visual.FINISHED:
             mov.draw()
             self.experiment.win.flip()
     
     #if getInput=True, wait for keyboard press before advancing
     if getInput:
         self.experiment.input.get_key()
Esempio n. 8
0
 def show_fixation_screen(self, time=0):
     self.mouse.set_visible(False)
     self.disp.fill(self.fixation_screen)
     self.disp.show()
     libtime.pause(time)
Esempio n. 9
0
def acclimation(center_of_screen, tracker, disp, keyboard, AOI_left, AOI_right):
    image_set = generate_trial_images()

    #start trials
    for index in range(0,len(image_set)):
        # make trial screens
        fixation_cross_screen = Screen()
        fixation_cross_screen.draw_fixation(fixtype='cross', pos=center_of_screen, colour=(255,255,255), pw=5, diameter=30)
        number_screen = Screen()
        number_screen.draw_text(text=str(np.random.randint(1,10)),pos = center_of_screen, colour=(255,255,255), fontsize=40)
        face_pair_screen = Screen()
        disengagement_screen = Screen()

        # start with blank screen	 for 500 ms and start recording
        disp.fill()
        disp.show()
        tracker.start_recording()
        tracker.log("start_trial %d" %index)
        trialstart = libtime.get_time()
        libtime.pause(500)

        # fixation	cross screen
        disp.fill(fixation_cross_screen)
        disp.show()
        libtime.pause(500)
        fixation_cross_screen.clear()

        # number screen
        disp.fill(number_screen)
        disp.show()
        libtime.pause(1000)
        number_screen.clear()

        #draws image pair
        image_pair = image_set[index]
        face_pair_screen.draw_image(image_pair[0], pos=(center_of_screen[0]-300,center_of_screen[1]), scale=None) #need screen width
        face_pair_screen.draw_image(image_pair[1], pos=(center_of_screen[0]+300,center_of_screen[1]), scale=None) #need screen width
        disp.fill(face_pair_screen)
        disp.show()
        
        neutral_image_index = 0
        if ("NE" in image_pair[1]):
            neutral_image_index = 1
        
        #NEED WHILE LOOP TO CAPTURE FIXATIONS AND TIME
        start_time_taken = time.time() * 1000
        total_time_taken = 0
        time_neutral = 0
        time_emotional = 0
        last_pass_time_stamp = (time.time() * 1000) - start_time_taken
        last_pass_time_taken = 0

        first_image = 0

        count_fixation_on_emotional = 0
        last_fixation_on_emotional = False
        while total_time_taken < 3000:
            pressed_key = keyboard.get_key()[0]
            if (pressed_key == 'q'):
                break

            tracker_pos = tracker.sample()
            
            if AOI_right.contains(tracker_pos):
                #Add time
                if neutral_image_index == 0:
                    time_emotional = time_emotional + last_pass_time_taken
                    if not last_fixation_on_emotional:
                        count_fixation_on_emotional = count_fixation_on_emotional + 1
                    last_fixation_on_emotional = True
                else:
                    time_neutral = time_neutral + last_pass_time_taken
                    last_fixation_on_emotional = False
                    
            elif AOI_left.contains(tracker_pos):
                #Add time
                if neutral_image_index == 0:
                    time_neutral = time_neutral + last_pass_time_taken
                    last_fixation_on_emotional = False
                else:
                    time_emotional = time_emotional + last_pass_time_taken
                    if not last_fixation_on_emotional:
                        count_fixation_on_emotional = count_fixation_on_emotional + 1
                    last_fixation_on_emotional = True


            last_pass_time_taken = (time.time() * 1000) - last_pass_time_stamp
            last_pass_time_stamp = (time.time() * 1000)
            total_time_taken = (time.time() * 1000) - start_time_taken

        if (pressed_key == 'q'):
            break

        #libtime.pause(3000) # 3000 ms of free viewing
        #image pair index 2 tells us if we need to draw a circle/square.

        #myRect_ontheleft = (center_of_screen[0]-300-163, center_of_screen[0]-300+163, center_of_screen[1]+163, center_of_screen[1]-163)
        #myRect_ontheright = (center_of_screen[0]+300-163, center_of_screen[0]+300+163, center_of_screen[1]+163, center_of_screen[1]-163)

        if (image_pair[2] == True):
            # new_face_pair_screen = swap(face_pair_screen, image_pair, tracker)

            #if ("Male" in image_pair[0]):
                #new_suffix = "_result.jpg"
            #else:
            new_suffix = circle_suffix
            if (random.choice([True, False]) == True):
                new_suffix = square_suffix

            image_pair[neutral_image_index] = image_pair[neutral_image_index].replace(regular_suffix, new_suffix)

            disengagement_screen.draw_image(image_pair[0], pos=(center_of_screen[0]-300,center_of_screen[1]), scale=None) #need screen width
            disengagement_screen.draw_image(image_pair[1], pos=(center_of_screen[0]+300,center_of_screen[1]), scale=None) #need screen width

            while keyboard.get_key()[0] == None:
                start_pos = tracker.sample()
                #face_pair_screen.draw_circle(colour=(255,255,255), pos=((start_pos[0]-center_of_screen[0]+300)**2,(start_pos[1]-center_of_screen[1])**2, 326/2))
                #disp.fill(face_pair_screen)
                #disp.show()
                if neutral_image_index == 0:
                    #area = pygame.Rect(myRect_ontheleft)
                    #pygame.draw.rect(face_pair_screen, (100, 200, 70), area)
                    #pygame.display.flip()
                    #if ((start_pos[0]-center_of_screen[0]+300)**2 + (start_pos[1]-center_of_screen[1])**2)**0.5 < 100/2:
                    if AOI_right.contains(start_pos):
                        #face_pair_screen.draw_circle(color=(255,255,255), pos=(start_pos[0]-center_of_screen[0]+300)**2,start_pos[1]-center_of_screen[1])**2), 326/2)

                        #print("you fixated on the right image:))")
                        disengagement_start_time = libtime.get_time()


                        # if fixation is started here... draw new images.
                        #if (start_pos[0] >= center_of_screen[0]-300 and start_pos[0] <= center_of_screen[0]-300+image_HW and start_pos[1] >= center_of_screen[1] and start_pos[1] <= center_of_screen[1]+image_HW):
                        face_pair_screen.clear()
                        #disengagement_screen.draw_text(text="yep", pos=center_of_screen)
                        #while keyboard.get_key()[0] == None:
                        disp.fill(disengagement_screen)
                        disp.show()
                        
                        while True:
                            start_pos = tracker.sample()
                            if AOI_left.contains(start_pos):			
                                print("you fixated on the right image:))")
                                disengagement_end_time = libtime.get_time()
                                break
                        break

                    # then wait for fixation on position of image_pair[1], i.e. the opposite
                if neutral_image_index == 1:
                    #area = pygame.Rect(myRect_ontheright)
                    #pygame.draw.rect(face_pair_screen, (100, 200, 70), area)
                    #pygame.display.flip()
                    #if ((start_pos[0]-center_of_screen[0]-300)**2 + (start_pos[1]-center_of_screen[1])**2)**0.5 < 326/2:
                    if AOI_left.contains(start_pos):
                        disengagement_start_time = libtime.get_time()
                                                                                    #if (start_pos[0] >= center_of_screen[0]+300 and start_pos[0] <= center_of_screen[0]+300+image_HW and start_pos[1] >= center_of_screen[1] and start_pos[1] <= center_of_screen[1]+image_HW):
                        face_pair_screen.clear()
                        #disengagement_screen.draw_image(image_pair[0], pos=(center_of_screen[0]-300,center_of_screen[1]), scale=None) #need screen width
                        #disengagement_screen.draw_image(image_pair[1], pos=(center_of_screen[0]+300,center_of_screen[1]), scale=None) #need screen width
                        disp.fill(disengagement_screen)
                        disp.show()
                        
                        while True:
                            start_pos = tracker.sample()
                            if AOI_right.contains(start_pos):			
                                disengagement_end_time = libtime.get_time()
                                print("Total time taken" + str(disengagement_end_time - disengagement_start_time))
                                break
                        break
        else:
            continue

        if (pressed_key == 'q'):
            break

        # end trial
        trialend = libtime.get_time()
        tracker.stop_recording()
        tracker.log("stop trial %d" % index)


        # log information in the end
        # add a way out (quit if pressing q)
        if keyboard.get_key()[0] == "q":
            break
Esempio n. 10
0
# Create a BAD sound.
bad_sound = Sound(osc="whitenoise", length=200)
bad_sound.set_volume(1)
good_sound = Sound(osc="sine", freq=440, length=200)
good_sound.set_volume(0.5)

# Create a new Screen instance.
scr = Screen()
scr.draw_text("Welcome!", fontsize=100, \
    colour=(255,100,100))

# Pass the screen to the display.
disp.fill(scr)
disp.show()
timer.pause(3000)

# Create a list of all trials.
trials = []
# Add all the words.
for word in WORDS:
    t = {}
    t["type"] = "word"
    t["stimulus"] = word
    trials.append(t)
# Add all the nonwords.
for word in NONWORDS:
    t = {}
    t["type"] = "nonword"
    t["stimulus"] = word
    trials.append(t)
Esempio n. 11
0
    tracker.status_msg("trial %d/%d" % (trialnr + 1, ntrials))

    # perform a drift check
    tracker.drift_correction()

    # wait for the other player

    # RUN TRIAL

    # present image
    disp.fill(scr)
    t0 = disp.show()
    tracker.log("image online at %d" % t0)

    # wait for a bit
    timer.pause(TRIALTIME)

    # reset screen
    disp.fill()
    t1 = disp.show()
    tracker.log("image offline at %d" % t1)

    # stop recording
    tracker.log("TRIALEND %d" % trialnr)
    tracker.stop_recording()

    # TRIAL AFTERMATH
    # bookkeeping
    log.write([trialnr, images[trialnr], t1 - t0])

    # inter trial interval
Esempio n. 12
0
    def presentTrial(self,curTrial,curTrialIndex):
        #self.checkExit()
        #self.experiment.disp.show()
        
        #random jitter prior to trial start
        libtime.pause(self.ISI+random.choice([0,100,200])) 
        
        
        
        #######start eye tracking##########
        if self.experiment.subjVariables['eyetracker']=="yes":
            self.experiment.tracker.start_recording()
            #log data on trial
            self.experiment.tracker.log("Experiment %s subjCode %s seed %s TrialNumber %s TrialType %s audio %s image %s" % (self.experiment.expName, self.experiment.subjVariables['subjCode'],str(self.experiment.subjVariables['seed']),str(curTrialIndex),curTrial['audioType'], curTrial['audio'],curTrial['image']))
        #start trial timer
        trialTimerStart=libtime.get_time()
        
        

        #create ag screen
        #agScreen=libscreen.Screen(disptype='psychopy')
        agScreenTime=libtime.get_time()  
        if self.experiment.subjVariables['eyetracker']=="yes":
            #log event
            self.experiment.tracker.log("agStart")
        
        agCount = 0
        keyBreak = False
        movPlaying = True
        self.mov.play()
        while self.mov.status != visual.FINISHED and libtime.get_time() - agScreenTime < self.AGTimeOut:
            self.mov.draw()
            self.experiment.win.flip()
            
            libtime.pause(10)
            
            if self.experiment.subjVariables['activeMode']=="input" and self.experiment.subjVariables['inputDevice']=="keyboard":
                for key in event.getKeys():
                    if key == 'space':
                        if self.mov.status == visual.PLAYING:
                            self.mov.pause()
                            self.experiment.win.flip()
                            movPlaying = False
                        keyBreak = True
                        
                if keyBreak:
                    break
            else:  
                if self.experiment.subjVariables['activeMode']=="gaze":
                    inputpos = self.experiment.tracker.sample()
                elif self.experiment.subjVariables['activeMode']=="input" and self.experiment.subjVariables['inputDevice']=="mouse":
                    inputpos = self.experiment.input.get_pos()
            
            
                if self.aoiCenterMovie.contains(inputpos):
                    agCount += 1
                if agCount > self.AGFixCount:
                    #if self.mov.status == visual.PLAYING:
                    break
                    
        if movPlaying:       
            if self.mov.status == visual.PLAYING:
                self.mov.pause()
                self.experiment.win.flip()
                    
        #print libtime.get_time() - agScreenTime
                
        if self.experiment.subjVariables['eyetracker']=="yes":
                    #log event
                    self.experiment.tracker.log("agEnd")
            
        #create starting screen
        startScreen=libscreen.Screen(disptype='psychopy')
        curPic=self.pictureMatrix[str(curTrial['image'])][0]
        curPicCoord=self.pos['center']
        curPic.setPos(curPicCoord)
        curPic.size = (300,300)
        curBox = self.pictureMatrix[str(curTrial['box'])][0]
        curBox.size = self.posDims
        curBox.pos = self.pos['center']
        buildScreenPsychoPy(startScreen,[self.rect,curPic,curBox,self.grayRect])
        
        #present starting screen
        setAndPresentScreen(self.experiment.disp, startScreen)
        startScreenTime=libtime.get_time()  
        if self.experiment.subjVariables['eyetracker']=="yes":
            #log event
            self.experiment.tracker.log("startScreen")
        
        libtime.pause(self.screenPause)
        
        
        #slide screen up
        for i in range(0,self.BoxStepCount+1,self.BoxStep):
                #set up box
                curBox.pos=(self.pos['center'][0],i)
                #add new screen
                curScreen=libscreen.Screen(disptype='psychopy')
                #add stimuli to the screen
                buildScreenPsychoPy(curScreen,[self.rect,curPic,curBox,self.grayRect])
                setAndPresentScreen(self.experiment.disp,curScreen)
                if i==self.BoxStepCount:
                    screenUpTime=libtime.get_time()
                    if self.experiment.subjVariables['eyetracker']=="yes":
                        #log screen slide event
                        self.experiment.tracker.log("screenUp")
        
        #play audio
        self.soundMatrix[curTrial['audio']].play()
        audioStartTime=libtime.get_time()
        if self.experiment.subjVariables['eyetracker']=="yes":
            #log audio event
            self.experiment.tracker.log("audioStart")
        
    
        
        ######Contingent Procedure######
        lookProcedureTimes = self.watchProcedure(curTrial,audioStartTime,curTrial['audioDur'],1000,self.aoiScreen,looming=True,curPic=curPic,stim1=self.rect,stim2=curBox,stim3=self.grayRect)
        #print lookProcedureTimes
        self.soundMatrix[curTrial['audio']].stop()
        self.experiment.disp.show()
        audioEndTime=libtime.get_time()-audioStartTime
        if self.experiment.subjVariables['eyetracker']=="yes":
            #log audio end event
            self.experiment.tracker.log("audioEnd")
        
        ######Stop Eyetracking######
        
        #trialEndTime
        trialTimerEnd=libtime.get_time()
        #trial time
        trialTime=trialTimerEnd-trialTimerStart
        if self.experiment.subjVariables['eyetracker']=="yes":
            #stop eye tracking
            self.experiment.tracker.stop_recording()
        
        #######Save data#########
        
        fieldVars=[]
        for curField in self.trialFieldNames:
            fieldVars.append(curTrial[curField])
   
        [header, curLine] = createRespNew(self.experiment.subjInfo, self.experiment.subjVariables, self.trialFieldNames, fieldVars,
                                        a_curTrialIndex=curTrialIndex,
                                        b_expTimer=trialTimerEnd,
                                        c_trialStart=trialTimerStart,
                                        d_trialTime=trialTime,
                                        e_totalTime = lookProcedureTimes[0],
                                        f_lookAways = lookProcedureTimes[1],
                                        g_totalLookingTime = lookProcedureTimes[2],
                                        h_totalLookingTimeNS = lookProcedureTimes[3],
                                        i_agScreenTime = agScreenTime,
                                        j_startScreenTime = startScreenTime,
                                        k_audioStartTime=audioStartTime,
                                        l_audioEndTime = audioEndTime
                                        )
        
        writeToFile(self.experiment.outputFile,curLine)
Esempio n. 13
0
 def show_intro_screen(self):
     self.mouse.set_visible(True)
     self.disp.fill(self.intro_screen)
     self.disp.show()
     self.mouse.get_clicked()
     libtime.pause(300)
    checked = False
    while not checked:
        disp.fill(fixscreen)
        disp.show()
        checked = tracker.drift_correction()

    # start eye tracking
    tracker.start_recording()
    tracker.status_msg("trial %d" % trialnr)
    tracker.log("start_trial %d trialtype %s" % (trialnr, trialtype))

    # present fixation
    disp.fill(screen=fixscreen)
    disp.show()
    tracker.log("fixation")
    libtime.pause(random.randint(750, 1250))

    # present target
    disp.fill(targetscreens[trialtype])
    t0 = disp.show()
    tracker.log("target %s" % trialtype)

    # wait for eye movement
    t1, startpos = tracker.wait_for_saccade_start()
    endtime, startpos, endpos = tracker.wait_for_saccade_end()

    # stop eye tracking
    tracker.stop_recording()

    # process input:
    if (trialtype == 'left' and endpos[0] < constants.DISPSIZE[0] / 2) or (
# Initialise a Screen instance for arbitrary drawing.
scr = Screen()
scr.draw_text(text="Initialising the experiment...", fontsize=FONTSIZE)
disp.fill(scr)
disp.show()

# Initialise the ka-ching sound.
kaching = Sound(soundfile=KACHING)

# Initialise a Keyboard and a Mouse instance for participant interaction.
kb = Keyboard()
mouse = Mouse()

# COMMUNICATIONS
timer.pause(5000)
_print("Initialising Client.")
# Initialise a new Client instance.
client = Client(multicast_ip)

# Establish a connection with the server.
scr.clear()
scr.draw_text(text="Connecting to the server...", fontsize=FONTSIZE)
disp.fill(scr)
disp.show()
_print("Connecting to the Server.")
success = client.contact_server(timeout=CONTIMEOUT)

# Get the experiment parameters.
scr.clear()
scr.draw_text(text="Getting experiment parameters...", fontsize=FONTSIZE)
Esempio n. 16
0
        # disp.fill(stimscr) and then disp.show(), all stimuli in stimscr
        # (including the ImageStim) will be drawn.
        stimscr.screen.append(stim)

    # Wait until the participant presses any key to start.
    disp.fill(textscr)
    disp.show()
    kb.get_key(keylist=None, timeout=None, flush=True)

    # Log the start of the trial.
    log.write([time.strftime("%y-%m-%d"), time.strftime("%H-%M-%S"), \
        trialnr, vidname, timer.get_time()])

    # Start eye tracking.
    tracker.start_recording()
    timer.pause(5)
    tracker.log("TRIALSTART")

    # Show a status message on the EyeLink.
    if TRACKERTYPE == 'eyelink':
        tracker.status_msg("Trial %d/%d (%s)" %
                           (trialnr, len(VIDEOS), vidname))

    # Log trial specifics to gaze data file.
    timer.pause(5)
    tracker.log("TRIALNR %d; VIDNAME %s; EXPTIME %d; PCTIME %s" % \
        (trialnr, vidname, timer.get_time(), \
        datetime.datetime.now().strftime('%Y-%m-%d_%H:%M:%S.%f')))

    # Loop until the video ends.
    for framenr in range(nframes):
Esempio n. 17
0
feedbackscreens[1].draw_text(text='correct', colour=(0,255,0))
feedbackscreens[0] = libscreen.Screen()
feedbackscreens[0].draw_text(text='incorrect', colour=(255,0,0))

# # # # #
# run the experiment

# run 20 trials
for trialnr in range(1,21):
	# prepare trial
	trialtype = random.choice(['left','right'])
	
	# present fixation
	disp.fill(screen=fixscreen)
	disp.show()
	libtime.pause(random.randint(750, 1250))
	
	# present target
	disp.fill(targetscreens[trialtype])
	t0 = disp.show()
	
	# wait for input
	response, t1 = kb.get_key()

	# end the experiment when 'escape' is pressed
	if response == 'escape':
		break
	
	# process input
	if response == trialtype:
		correct = 1
Esempio n. 18
0
    fontsize=24)
disp.fill(scr)
disp.show()
kb_space.get_key()

ITD_array = np.linspace(-1, 1, num=80)
ITD_size = 400
np.random.shuffle(ITD_array)
response_array = []

sound = wf.waveform(wavetype='wn', duration=0.1)
box = visual.Circle(pygaze.expdisplay, radius=200)

for itd in ITD_array:
    disp.show()
    libtime.pause(100 + np.random.uniform(0, 200))

    box_time = 800 + libtime.get_time()
    sound_time = 800 + (itd * ITD_size) + libtime.get_time()

    box_shown = 0
    sound_played = 0

    scr = libscreen.Screen()
    scr.clear()

    starttime = libtime.get_time()
    maxtrialtime = 1350

    while libtime.get_time() < starttime + maxtrialtime:
        time = libtime.get_time()
Esempio n. 19
0
""" more trials.

PRESS ANY BUTTON TO END THE BREAK
"""
breakscr.draw_text(breaktxt, fontsize=MAIN_FONTSIZE)





# # # # #
# DISPLAY INSTRUCTIONS 

#loop through instruction screens 
for scrn in instruction_screens: 
    timer.pause(100)
    disp.fill(scrn); 
    disp.show()
    btn_pressed = False  
    if MEG: # if MEG repeatedly loop until button state changes
        trigbox.wait_for_button_press()

    else: 
        mouse.get_clicked()


#now show the first practice screen 
disp.fill(prac_scr)
timer.pause(500) # pause so snake does not appear to be instantly moving
disp.show()
Esempio n. 20
0
disp.fill(instscr)
disp.show()
kb.get_key(keylist='space', timeout=None)
#timer.pause(20000)
'''
Loop para realizar a tarefa N ensaiios
'''

for trial in alltrials:
    '''
    Apresentação da tela de fixação
    '''
    disp.fill(fixscr)
    fixonset = disp.show()

    timer.pause(FIXTIME)
    '''
    Apresentação da tela de Cue
    '''

    disp.fill(
        cuescr[trial['cueside']]
    )  # Para fazer referência ao dict Cue (lado) do trial - alltrials
    cueonset = disp.show()

    timer.pause(CUETIME)
    '''
    Apresentar novamente a tela de fixação
    Criar a instância para indicar o final da Cue
    '''
pressed = [0, 0, 0]
wait = True
while wait == True:
    tryscreen = exscreen
    tryscreen.draw_text('Press the \n middle button',
                        pos=(((1 - margin) / 4) * xax,
                             ((1 - margin) / 4) * yax),
                        fontsize=20)
    disp.fill(tryscreen)
    disp.show()
    resp = mse.get_pressed()
    if resp[2] == 1:
        event.clearEvents(eventType='mouse')
        wait = False

timer.pause(1000)

pressed = ()
wait = True
print(wait)
disp.fill(introscreen)
disp.show()
while wait == True:
    print(pressed)
    pressed = mse.get_pressed()
    introscreen.draw_text(str(pressed),
                          pos=(((1 - margin) / 4) * xax,
                               ((1 - margin) / 4) * yax),
                          fontsize=20)
    disp.fill(introscreen)
    disp.show()
Esempio n. 22
0
    def show_response_screen(self, trial_info, tracker):
        #        self.mouse.set_pos((DISPSIZE[0]/2, DISPSIZE[1]-(self.start_button_size[1]/2 + self.start_button_offset)))
        self.mouse.set_visible(visible=True)

        # This (re)sets the deck images every trial, so it doesn't show the flipped images
        # after reading code below
        # Note that what changes is the image drawn (not the variable)
        if trial_info['is_take_left']:
            self.left_resp_img.setImage('resources/T_button.png')
            self.right_resp_img.setImage('resources/L_button.png')
        else:
            self.left_resp_img.setImage('resources/L_button.png')
            self.right_resp_img.setImage('resources/T_button.png')

#        self.disp.show()
        trial_start_time = libtime.get_time()
        while self.deadzone_rect.contains(self.mouse.mouse):
            self.disp.fill(self.response_screen)
            self.disp.show()
        idle_time = libtime.get_time() - trial_start_time

        self.target.setOpacity(1.0)
        self.target.setText(text=trial_info['target_num'])
        if (trial_info['is_threat']):
            self.target.setColor(THREAT_TARGET_COLOR)
        else:
            self.target.setColor(TARGET_COLOR)

        self.disp.fill(self.response_screen)
        self.disp.show()

        option_chosen = None
        response_dynamics_log = []

        response_start_time = libtime.get_time()

        while option_chosen is None:
            mouse_position = self.mouse.get_pos()
            t = libtime.get_time() - response_start_time

            eye_position = tracker.sample()
            pupil_size = tracker.pupil_size()

            response_dynamics_log.append([
                trial_info['subj_id'], trial_info['block_no'],
                trial_info['trial_no'],
                str(t), mouse_position[0], mouse_position[1], pupil_size,
                eye_position[0], eye_position[1]
            ])

            if self.mouse.mouse.isPressedIn(self.left_resp_rect):
                if trial_info['is_take_left']:
                    option_chosen = 'T'
                    self.left_resp_img.setImage(
                        'resources/selectedT.png')  # CREATE THE STIM!!!
                else:
                    option_chosen = 'L'
                    self.left_resp_img.setImage('resources/selectedL.png')
            elif self.mouse.mouse.isPressedIn(self.right_resp_rect):
                if trial_info['is_take_left']:
                    option_chosen = 'L'
                    self.right_resp_img.setImage('resources/selectedL.png')
                else:
                    option_chosen = 'T'
                    self.right_resp_img.setImage('resources/selectedT.png')

            libtime.pause(TIMESTEP)
        response_time = libtime.get_time() - response_start_time

        self.target.setOpacity(0.0)
        self.target.setText(text='')

        self.disp.fill(self.response_screen)
        self.disp.show()
        libtime.pause(300)

        return response_dynamics_log, option_chosen, response_time, idle_time
Esempio n. 23
0
		# Countdown before a the pause screen to make sure that BOLD is down to
		# baseline.
		scr.clear()
		scr.draw_text("Please wait for %d seconds..." % \
			(numpy.ceil(INTERBLOCK_MIN_PAUSE/1000.0)), fontsize=24)
		disp.fill(scr)
		t0 = disp.show()
		event_log.write([t, "block %d onset " % (ii) + currBlock[0][5]])
		t1 = copy.deepcopy(t0)
		while t1 - t0 < INTERBLOCK_MIN_PAUSE:
			scr.clear()
			scr.draw_text("Please wait for %d seconds..." % \
				(numpy.ceil((INTERBLOCK_MIN_PAUSE-(t1-t0))/1000.0)), fontsize=24)
			disp.fill(scr)
			t1 = disp.show()
			timer.pause(100)
			disp.fill(scr); # fill display
			disp.show() # show display
		disp.fill(inter_block)
		disp.show()
		### CONTINUE WHEN BUTTON PRESSED ###
		if MRI: # if MEG repeatedly loop until button state changes
			button, t1 = trigbox.wait_for_button_press(allowed=[MAIN_BUT], timeout=None)
			t1 = timer.get_time()
#			btn_pressed = False # set flag to false
#			while not btn_pressed:
#				btn_list, state = trigbox.get_button_state(button_list=[MAIN_BUT])
#				if not state[0]:
#					btn_pressed = True
		else: 
			mousebutton, clickpos, t1 = mouse.get_clicked()
Esempio n. 24
0
    tracker.drift_correction()

    # RUN TRIAL
    # start tracking
    tracker.start_recording()
    tracker.log("TRIALSTART %d" % trialnr)
    tracker.log("IMAGENAME %s" % images[trialnr])
    tracker.status_msg("trial %d/%d" % (trialnr + 1, ntrials))

    # present image
    disp.fill(scr)
    t0 = disp.show()
    tracker.log("image online at %d" % t0)

    # wait for a bit
    timer.pause(TRIALTIME)

    # reset screen
    disp.fill()
    t1 = disp.show()
    tracker.log("image offline at %d" % t1)

    # stop recording
    tracker.log("TRIALEND %d" % trialnr)
    tracker.stop_recording()

    # TRIAL AFTERMATH
    # bookkeeping
    log.write([trialnr, images[trialnr], t1 - t0])

    # inter trial interval
Esempio n. 25
0
# eye tracking
tracker = EyeTracker(disp, trackertype='dummy')
frl = FRL(pos='center', dist=125, size=200)

# input collection and storage
kb = Keyboard(keylist=['escape', 'space'], timeout=None)
log = Logfile()
log.write(["trialnr", "trialstart", "trialend", "duration", "image"])

# run trials
tracker.calibrate()
for trialnr in range(0, len(IMAGES)):
    # blank display
    disp.fill()
    disp.show()
    libtime.pause(1000)
    # prepare stimulus
    scr.clear()
    scr.draw_image(IMAGES[trialnr])
    # start recording eye movements
    tracker.drift_correction()
    tracker.start_recording()
    tracker.status_msg("trial %d" % trialnr)
    tracker.log("start trial %d" % trialnr)
    # present stimulus
    response = None
    trialstart = libtime.get_time()
    while not response:
        gazepos = tracker.sample()
        frl.update(disp, scr, gazepos)
        response, presstime = kb.get_key(timeout=1)
Esempio n. 26
0
            ])
        if timer.get_time() - t0 >= TRIALTIME_L:
            is_found = 0
            break
    log_sub.write([ntrials, images[trialnr], is_found, timer.get_time() - t0])
    # reset screen
    disp.fill()
    t1 = disp.show()
    tracker.log("image offline at %d" % t1)

    # stop recording
    tracker.log("TRIALEND %d" % trialnr)
    tracker.stop_recording()

    # inter trial interval
    timer.pause(ITI)

# # # # #
# CLOSE

# loading message
scr.clear()
scr.draw_text(text="Transferring the data file, please wait...",
              fontsize=TEXTSIZE_M)
disp.fill(scr)
disp.show()

# neatly close connection to the tracker
# (this will close the data file, and copy it to the stimulus PC)
tracker.close()
Esempio n. 27
0
from pygaze.display import Display
import pygaze.libtime as timer
from pygaze.screen import Screen
from constants import *

fixscreen = Screen()
fixscreen.draw_fixation(fixtype='dot')

disp = Display()
timer.pause(2000)
disp.close()
Esempio n. 28
0
    def watchProcedure(self,curTrial,startTime,maxTime,maxLookAwayTime, aoi,looming=False,curPic=None,stim1=None,stim2=None,stim3=None):
        totalLookingTime = 0
        nonLookingTimes = []
        transitionNonLookingTimes = []
        lookAways = 0
        curNonLookingTime = 0
        looking = True
        nonLook = False
        curLookAwayTime = 0
        responded = True
        counter=0.0
        direction=1
        startValues=(300,300)
        loomCounter = 0
        transition=False
        #list to store last 150 ms of looking
        last150ms=[]
        #store current location to initiate checking of when looks go off screen
        if self.experiment.subjVariables['activeMode']=="gaze":
            lastInputpos = self.experiment.tracker.sample()
            if lastInputpos == self.lookAwayPos:
                transitionNonLookingTimeOnset = libtime.get_time()
        elif self.experiment.subjVariables['activeMode']=="input" and self.experiment.subjVariables['inputDevice']=="mouse":
            lastInputpos = self.experiment.input.get_pos()
            if lastInputpos == self.lookAwayPos:
                transitionNonLookingTimeOnset = libtime.get_time()
            
        while libtime.get_time() - startTime < maxTime and curLookAwayTime < maxLookAwayTime:
            if self.experiment.subjVariables['activeMode']=="input" and self.experiment.subjVariables['inputDevice']=="keyboard":
                for key in event.getKeys():
                    if key == 'space' and looking ==True:
                        responded = False
                        event.clearEvents()
                        transitionNonLookingTimeOnset = libtime.get_time()
                    elif key == 'space' and looking ==False:
                        responded = True
                        event.clearEvents()
                    
            else:
                libtime.pause(10)
                #get gaze/ mouse position
                if self.experiment.subjVariables['activeMode']=="gaze":
                    curInputpos = self.experiment.tracker.sample()
                elif self.experiment.subjVariables['activeMode']=="input":
                    curInputpos = self.experiment.input.get_pos()
                #mark transition time
                if curInputpos == self.lookAwayPos and lastInputpos != self.lookAwayPos:
                    transition=True
                    transitionNonLookingTimeOnset = libtime.get_time()
                else:
                    transition = False
                
                ####smoothing eyetracking/mousetracking sample###
                
                ##add cur gaze position to the list
                last150ms.append(curInputpos)
                #
                ##if the length of the list exceeds 150 ms/25==6, then delete the earliest item in the lis
                ## 25 ms because an average run through the while loop takes between 20-30 ms
                if len(last150ms)>6:
                    del last150ms[0]
                
                ##Now, remove the (no looking data) tuples
                last150msClean=[e for e in last150ms if e != self.lookAwayPos]
                ##Now calculate the mean
                if len(last150msClean)>0:
                    #calculate mean
                    #looks a bit tricky, but that's just because I think the gaze positions are stored as tuples, which is a bit of an odd data structure.
                    inputpos=tuple(map(lambda y: sum(y) / float(len(y)), zip(*last150msClean)))
                else:
                    inputpos=self.lookAwayPos
 
                ####smoothing procedure end###
                
                responded = aoi.contains(inputpos)
                                
                #update last gaze position
                lastInputpos = curInputpos
              
            if not responded and looking:
                nonLookingTimeOnset = libtime.get_time()
                looking = False
                lookAways +=1
                nonLook = True
                
            
            
            if responded:
                if not looking:
                    looking = True
                    nonLookOnset = False
                    curNonLookingTime=libtime.get_time()-nonLookingTimeOnset
                    curTransitionNonLookingTime=libtime.get_time()- transitionNonLookingTimeOnset
                    nonLookingTimes.append(curNonLookingTime)
                    transitionNonLookingTimes.append(curTransitionNonLookingTime)
            
            if looking:
                curLookAwayTime = 0
            else:
                curLookAwayTime = libtime.get_time() - nonLookingTimeOnset
                if curLookAwayTime > maxLookAwayTime:
                    nonLookingTimes.append(curLookAwayTime)
                    curTransitionNonLookingTime=libtime.get_time()- transitionNonLookingTimeOnset
                    transitionNonLookingTimes.append(curTransitionNonLookingTime)
            
            if looming:
                  
                #update screen
                newScreen=libscreen.Screen(disptype='psychopy')
                counter +=1
                if counter > 100:
                    direction=(-1)*direction
                    counter = 0.0
                    startValues=(xSize,ySize)
                xSize = self.easeInOut(startValues[0],200.0,counter,100,direction)
                ySize = self.easeInOut(startValues[1],200.0,counter,100,direction)
                curPic.size = (xSize,ySize)
                buildScreenPsychoPy(newScreen,[stim1,curPic,stim2,stim3])
                setAndPresentScreen(self.experiment.disp, newScreen)
            
                
                    
        totalTime=libtime.get_time()-startTime
        totalLookingTime = totalTime - sum(nonLookingTimes)
        totalLookingTimeNonSmoothed = totalTime - sum(transitionNonLookingTimes)
        return [totalTime, lookAways, totalLookingTime,totalLookingTimeNonSmoothed]
    
    # Update the probe Screen.
    probelw = trial['nstim'] * [STIMLINEWIDTH]
    probelw[probed] = PROBELINEWIDTH
    probeoris = trial['nstim'] * [0]
    probestimtypes = trial['nstim'] * ['noise']
    probescr[trial['nstim']].update(stimlocs, probeoris, \
        linewidth=probelw, stimtypes=probestimtypes)
    

    # RUN
    
    # Show the fixation Screen.
    disp.fill(fixscr)
    fixonset = disp.show()
    timer.pause(random.randint(FIXTIME[0], FIXTIME[1]))
    
    # Show the stimulus Screen.
    disp.fill(stimscr[trial['nstim']])
    stimonset = disp.show()
    timer.pause(STIMTIME)
    
    # Show a blank Screen.
    disp.fill(blankscr)
    maintenanceonset = disp.show()
    timer.pause(MAINTENANCETIME)
    
    # Show the probe Screen.
    disp.fill(probescr[trial['nstim']])
    probeonset = disp.show()
    
Esempio n. 30
0
    # why is it pausing on the wrong screen???? solution: use psychopy instead of pygame for disptype
    #libtime.pause(10000)
    #tracker.stop_recording()
    #tracker.close()
    #disp.close()

    # wait for eye movement
    t1, startpos = tracker.wait_for_fixation_start()
    #endtime, startpos, endpos = tracker.wait_for_fixation_end()
    #if startpos == center_of_screen:
    if ((startpos[0] - center_of_screen[0])**2 +
        (startpos[1] - center_of_screen[1])**2)**0.5 < 100 / 2:
        screen2.clear()
        disp.fill(screen3)
        disp.show()
        libtime.pause(2000)
        break
        # to make it not flicker?
    if keyboard.get_key()[0] == "space":
        break
# stop eye tracking
tracker.stop_recording()
tracker.close()
disp.close()
# remember to do some drift correction in each trial
# and to log data and so forth (check examples and guidelines)

# In[ ]:

# In[ ]: