if ((gazepos[0]-stimpos[0])**2 + (gazepos[1]-stimpos[1])**2)**0.5 < STIMSIZE/2: screen.copy(hitscreen) points += PPH else: screen.copy(misscreen) points += PPM else: screen.copy(blankscreen) # draw stimulus screen.draw_circle(colour=STIMCOL, pos=stimpos, r=STIMSIZE/2, fill=True) # draw crosshair screen.draw_circle(colour=FGC, pos=gazepos, r=13, pw=2, fill=False) screen.draw_line(colour=FGC, spos=(gazepos[0]-15, gazepos[1]), epos=(gazepos[0]+15, gazepos[1]), pw=2) screen.draw_line(colour=FGC, spos=(gazepos[0], gazepos[1]-15), epos=(gazepos[0], gazepos[1]+15), pw=2) # draw point total screen.draw_text(text=str(points), colour=FGC, pos=(DISPSIZE[0]*0.9, DISPSIZE[1]*0.1), fontsize=FONTSIZE) # update display disp.fill(screen=screen) disp.show() # calculate new stimulus position if libtime.get_time() - tstim > STIMREFRESH: stimpos = (random.randint(int(DISPSIZE[0]*0.1),int(DISPSIZE[0]*0.9)), random.randint(int(DISPSIZE[1]*0.1),int(DISPSIZE[1]*0.9))) tstim = libtime.get_time() # stop eye tracking trialend = libtime.get_time() eyetracker.log("stop_trial %d" % trialnr) eyetracker.stop_recording() # # # # #
LOGFILENAME = basename + '_eyetracker' director = os.getcwd() LOGFILE = os.path.join(director,LOGFILENAME) FGC = cst.FGC BGC = cst.BGC SACCVELTHRESH = cst.SACCVELTHRESH SACCACCTHRESH = cst.SACCACCTHRESH TRACKERTYPE = cst.TRACKERTYPE # In[Tracker - Calibrate]: if withTracker and run == 0: scr.draw_text(text='tracker instructions') disp.fill(scr) disp.show() kb.get_key(keylist = None, timeout = None, flush = True) if TRACKERTYPE != 'dummy': tracker.calibrate() scr.clear() elif withTracker: scr.clear() # In[Initiate PsychoPy Objects]:
# PREPARE # load instructions from file instfile = open(INSTFILE) instructions = instfile.read() instfile.close() instfile = open(INSTFILE_DC) instruction_dc = instfile.read() instfile.close() # read all image names images = os.listdir(IMGDIR) image_size = (555, 987) # display instructions scr.draw_text(text="Press any key to start the calibration.", fontsize=TEXTSIZE_L) disp.fill(scr) disp.show() # wait for a keypress kb.get_key(keylist=None, timeout=None, flush=True) # calibrate the eye tracker tracker.calibrate() # # # # # # RUN # display task instructions scr.clear() scr.draw_text(text=instructions, fontsize=TEXTSIZE_M)
from pygaze.defaults import * from pygaze import libtime from pygaze.libscreen import Display, Screen from pygaze.libinput import Keyboard # start timing libtime.expstart() # objects disp = Display() scr = Screen() kb = Keyboard(keylist=['space'], timeout=1) # run annoying message while kb.get_key()[0] == None: # colour col = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)) # position pos = (random.randint(0, DISPSIZE[0]), random.randint(0, DISPSIZE[1])) # text scr.draw_text(text=MESSAGE, colour=col, pos=pos, fontsize=84) # display disp.fill(scr) disp.show() # reset screen scr.clear() # stop the madness disp.close()
red = int(sample[0] * 255) # Make sure red is between 0 and 255. if red < 0: red = 0 elif red > 255: red = 255 # The other colour values will be 0 green = 0 blue = 0 # Now set the background colour to the new colour. bgc = (red, green, blue) # Fill the Screen with the new background colour... scr.clear(colour=bgc) # ...and write the new sample text (white letters). scr.draw_text(text=sampletext, colour=(255,255,255), fontsize=100) # Now fill the Display with the updated Screen... disp.fill(scr) # ...and update the monitor! disp.show() # Don't forget to check if there is a keypress. key, presstime = kb.get_key() # Close the connection with the MP150. mp.close() # End the experiment. disp.close()
LOGFILENAME = basename + '_eyetracker' LOGFILE = os.path.join(cwd, LOGFILENAME) FGC = cst.FGC BGC = cst.BGC SACCVELTHRESH = cst.SACCVELTHRESH SACCACCTHRESH = cst.SACCACCTHRESH TRACKERTYPE = cst.TRACKERTYPE SCREENSIZE = cst.SCREENSIZE # In[Tracker - Calibrate]: if withTracker and run == 0: scr.draw_text( text= """We will now calibrate the eyetracker. Dots will appear one at a time. Focus on them until they disappear.""") disp.fill(scr) disp.show() kb.get_key(keylist=None, timeout=None, flush=True) if TRACKERTYPE != 'dummy': tracker.calibrate() scr.clear() elif withTracker: scr.clear() # In[Initiate PsychoPy Objects]:
["trialnr", "trialstart", "trialend", "disengagementtime", "imagepair"]) # fill in with the neccecary headlines # calibrate the eye-tracker tracker.calibrate() # make the sets of images image_set = generate() #shuffle our image sets shuffle(image_set) # give instuctions first instruction_screen = Screen() instruction_screen.draw_text( text= "You will watch a short clip. After, the trials will begin. \n Press space to continue", pos=center_of_screen, colour=(255, 255, 255), fontsize=22) while keyboard.get_key()[0] != "space": disp.fill(instruction_screen) disp.show() instruction_screen.clear() #call movie function - will need to switch betwwen neutral and sad #INSERT CODE HERE # start trials for trialnr in range(len(image_set)): # make trial screens fixation_cross_screen = Screen()
# your message MESSAGE = "AFK; BRB" # import stuff import random from pygaze.defaults import * from pygaze.libscreen import Display, Screen from pygaze.libinput import Keyboard # objects disp = Display() scr = Screen() kb = Keyboard(keylist=['space'],timeout=1) # run annoying message while kb.get_key()[0] == None: # colour col = (random.randint(0,255), random.randint(0,255), random.randint(0,255)) # position pos = (random.randint(0,DISPSIZE[0]), random.randint(0,DISPSIZE[1])) # text scr.draw_text(text=MESSAGE, colour=col, pos=pos, fontsize=84) # display disp.fill(scr) disp.show() # reset screen scr.clear() # stop the madness disp.close()
AOI_left = AOI(aoitype="rectangle", pos=(center_of_screen[0] - 300 - 163, center_of_screen[1] - 163), size=[326, 326]) AOI_right = AOI(aoitype="rectangle", pos=(center_of_screen[0] + 300 - 163, center_of_screen[1] - 163), size=[326, 326]) pressed_key = None # give trial instuctions first instruction_screen = Screen() instruction_screen.draw_text( text= "The practice trials will now begin. \n You will see a white cross followed by a white number. Please say the number out loud. \n You will see a pair of faces. Please watch them naturally. \n You may see a square of circle appear around an image. If you see either, please look at the image with the shape. \n Left click if it is a sqaure. Right click if it is a circle", pos=center_of_screen, colour=(255, 255, 255), fontsize=22) while keyboard.get_key()[0] != "space": disp.fill(instruction_screen) disp.show() instruction_screen.clear() pygame.display.update() from accl import acclimation acclimation(center_of_screen, tracker, disp, keyboard, AOI_left, AOI_right) #from vidtest import video #vid_screen = Screen() #vid_screen.draw_image(video('neutral.mp4'), pos=(center_of_screen[0]-300,center_of_screen[1]), scale=None)
def acclimation(center_of_screen, tracker, disp, keyboard, AOI_left, AOI_right): image_set = generate_trial_images() #start trials for index in range(0,len(image_set)): # make trial screens fixation_cross_screen = Screen() fixation_cross_screen.draw_fixation(fixtype='cross', pos=center_of_screen, colour=(255,255,255), pw=5, diameter=30) number_screen = Screen() number_screen.draw_text(text=str(np.random.randint(1,10)),pos = center_of_screen, colour=(255,255,255), fontsize=40) face_pair_screen = Screen() disengagement_screen = Screen() # start with blank screen for 500 ms and start recording disp.fill() disp.show() tracker.start_recording() tracker.log("start_trial %d" %index) trialstart = libtime.get_time() libtime.pause(500) # fixation cross screen disp.fill(fixation_cross_screen) disp.show() libtime.pause(500) fixation_cross_screen.clear() # number screen disp.fill(number_screen) disp.show() libtime.pause(1000) number_screen.clear() #draws image pair image_pair = image_set[index] face_pair_screen.draw_image(image_pair[0], pos=(center_of_screen[0]-300,center_of_screen[1]), scale=None) #need screen width face_pair_screen.draw_image(image_pair[1], pos=(center_of_screen[0]+300,center_of_screen[1]), scale=None) #need screen width disp.fill(face_pair_screen) disp.show() neutral_image_index = 0 if ("NE" in image_pair[1]): neutral_image_index = 1 #NEED WHILE LOOP TO CAPTURE FIXATIONS AND TIME start_time_taken = time.time() * 1000 total_time_taken = 0 time_neutral = 0 time_emotional = 0 last_pass_time_stamp = (time.time() * 1000) - start_time_taken last_pass_time_taken = 0 first_image = 0 count_fixation_on_emotional = 0 last_fixation_on_emotional = False while total_time_taken < 3000: pressed_key = keyboard.get_key()[0] if (pressed_key == 'q'): break tracker_pos = tracker.sample() if AOI_right.contains(tracker_pos): #Add time if neutral_image_index == 0: time_emotional = time_emotional + last_pass_time_taken if not last_fixation_on_emotional: count_fixation_on_emotional = count_fixation_on_emotional + 1 last_fixation_on_emotional = True else: time_neutral = time_neutral + last_pass_time_taken last_fixation_on_emotional = False elif AOI_left.contains(tracker_pos): #Add time if neutral_image_index == 0: time_neutral = time_neutral + last_pass_time_taken last_fixation_on_emotional = False else: time_emotional = time_emotional + last_pass_time_taken if not last_fixation_on_emotional: count_fixation_on_emotional = count_fixation_on_emotional + 1 last_fixation_on_emotional = True last_pass_time_taken = (time.time() * 1000) - last_pass_time_stamp last_pass_time_stamp = (time.time() * 1000) total_time_taken = (time.time() * 1000) - start_time_taken if (pressed_key == 'q'): break #libtime.pause(3000) # 3000 ms of free viewing #image pair index 2 tells us if we need to draw a circle/square. #myRect_ontheleft = (center_of_screen[0]-300-163, center_of_screen[0]-300+163, center_of_screen[1]+163, center_of_screen[1]-163) #myRect_ontheright = (center_of_screen[0]+300-163, center_of_screen[0]+300+163, center_of_screen[1]+163, center_of_screen[1]-163) if (image_pair[2] == True): # new_face_pair_screen = swap(face_pair_screen, image_pair, tracker) #if ("Male" in image_pair[0]): #new_suffix = "_result.jpg" #else: new_suffix = circle_suffix if (random.choice([True, False]) == True): new_suffix = square_suffix image_pair[neutral_image_index] = image_pair[neutral_image_index].replace(regular_suffix, new_suffix) disengagement_screen.draw_image(image_pair[0], pos=(center_of_screen[0]-300,center_of_screen[1]), scale=None) #need screen width disengagement_screen.draw_image(image_pair[1], pos=(center_of_screen[0]+300,center_of_screen[1]), scale=None) #need screen width while keyboard.get_key()[0] == None: start_pos = tracker.sample() #face_pair_screen.draw_circle(colour=(255,255,255), pos=((start_pos[0]-center_of_screen[0]+300)**2,(start_pos[1]-center_of_screen[1])**2, 326/2)) #disp.fill(face_pair_screen) #disp.show() if neutral_image_index == 0: #area = pygame.Rect(myRect_ontheleft) #pygame.draw.rect(face_pair_screen, (100, 200, 70), area) #pygame.display.flip() #if ((start_pos[0]-center_of_screen[0]+300)**2 + (start_pos[1]-center_of_screen[1])**2)**0.5 < 100/2: if AOI_right.contains(start_pos): #face_pair_screen.draw_circle(color=(255,255,255), pos=(start_pos[0]-center_of_screen[0]+300)**2,start_pos[1]-center_of_screen[1])**2), 326/2) #print("you fixated on the right image:))") disengagement_start_time = libtime.get_time() # if fixation is started here... draw new images. #if (start_pos[0] >= center_of_screen[0]-300 and start_pos[0] <= center_of_screen[0]-300+image_HW and start_pos[1] >= center_of_screen[1] and start_pos[1] <= center_of_screen[1]+image_HW): face_pair_screen.clear() #disengagement_screen.draw_text(text="yep", pos=center_of_screen) #while keyboard.get_key()[0] == None: disp.fill(disengagement_screen) disp.show() while True: start_pos = tracker.sample() if AOI_left.contains(start_pos): print("you fixated on the right image:))") disengagement_end_time = libtime.get_time() break break # then wait for fixation on position of image_pair[1], i.e. the opposite if neutral_image_index == 1: #area = pygame.Rect(myRect_ontheright) #pygame.draw.rect(face_pair_screen, (100, 200, 70), area) #pygame.display.flip() #if ((start_pos[0]-center_of_screen[0]-300)**2 + (start_pos[1]-center_of_screen[1])**2)**0.5 < 326/2: if AOI_left.contains(start_pos): disengagement_start_time = libtime.get_time() #if (start_pos[0] >= center_of_screen[0]+300 and start_pos[0] <= center_of_screen[0]+300+image_HW and start_pos[1] >= center_of_screen[1] and start_pos[1] <= center_of_screen[1]+image_HW): face_pair_screen.clear() #disengagement_screen.draw_image(image_pair[0], pos=(center_of_screen[0]-300,center_of_screen[1]), scale=None) #need screen width #disengagement_screen.draw_image(image_pair[1], pos=(center_of_screen[0]+300,center_of_screen[1]), scale=None) #need screen width disp.fill(disengagement_screen) disp.show() while True: start_pos = tracker.sample() if AOI_right.contains(start_pos): disengagement_end_time = libtime.get_time() print("Total time taken" + str(disengagement_end_time - disengagement_start_time)) break break else: continue if (pressed_key == 'q'): break # end trial trialend = libtime.get_time() tracker.stop_recording() tracker.log("stop trial %d" % index) # log information in the end # add a way out (quit if pressing q) if keyboard.get_key()[0] == "q": break
class InfantTobiiTracker(TobiiProTracker): """A customised version of the pygaze TobiiProTracker.class for Tobii Pro EyeTracker objects display -- a pygaze.display.Display instance ------------------- TobiiProTracker docs: https://github.com/esdalmaijer/PyGaze/blob/5fd62ef10b04015552c61297306b6db251235e02/pygaze/_eyetracker/libtobii.py#L18 """ def __init__(self, display, logfile, eventdetection=c.EVENTDETECTION, saccade_velocity_threshold=35, saccade_acceleration_threshold=9500, blink_threshold=c.BLINKTHRESH, **args): # redefining __init__ above, so we must explicitly call the superclass' init TobiiProTracker.__init__(self, display, logfile, eventdetection=c.EVENTDETECTION, saccade_velocity_threshold=35, saccade_acceleration_threshold=9500, blink_threshold=c.BLINKTHRESH, **args) # initialize screens self.screen = Screen(dispsize=self.disp.dispsize) self.c_screen = Screen(dispsize=self.disp.dispsize) self.screen.set_background_colour(colour=(0, 0, 0)) self.points_to_calibrate = [ self._norm_2_px(p) for p in [(0.5, 0.5), (0.1, 0.9), (0.1, 0.1), (0.9, 0.9), (0.9, 0.1)] ] self.datafilepath = "{0}_TOBII_output.tsv".format(logfile) self.datafile = open(self.datafilepath, 'w') # create handle for psychopy window for pre-calibration video self.video_win = pygaze.expdisplay self.video_win.mouseVisible = False self.video_win.size = self.disp.dispsize # new method def preCalibrate(self): """Helps position the infant while playing a video. returns Boolean indicating whether the positioning is done (True: 'space' has been pressed) """ self._write_enabled = False self.start_recording() # origin: top-left corner of precalibration box; size: tuple of lengths for box sides origin = (int(self.disp.dispsize[0] / 4), int(self.disp.dispsize[1] / 4)) size = (int(2 * self.disp.dispsize[0] / 4), int(2 * self.disp.dispsize[1] / 4)) # Initialise a PsychoPy MovieStim mov = visual.MovieStim3(self.video_win, c.CALIBVIDEO, flipVert=False) # print("------------> Pre-calibration process started.") print( "\t-> When correctly positioned, press \'space\' to start the calibration." ) while mov.status != visual.FINISHED: if not self.gaze: continue self.screen.clear() # Add the MovieStim to a PyGaze Screen instance. self.screen.screen.append(mov) # self.gaze.append(gaze_data), gaze_data is the data structure provided by Tobii gaze_sample = copy.copy(self.gaze[-1]) # latest gazepoint validity_colour = (255, 0, 0) if gaze_sample['right_gaze_origin_validity'] and gaze_sample[ 'left_gaze_origin_validity']: left_validity = 0.15 < gaze_sample[ 'left_gaze_origin_in_trackbox_coordinate_system'][2] < 0.85 right_validity = 0.15 < gaze_sample[ 'right_gaze_origin_in_trackbox_coordinate_system'][2] < 0.85 if left_validity and right_validity: validity_colour = (0, 255, 0) self.screen.draw_line(colour=validity_colour, spos=origin, epos=(origin[0] + size[0], origin[1]), pw=1) self.screen.draw_line(colour=validity_colour, spos=origin, epos=(origin[0], origin[1] + size[1]), pw=1) self.screen.draw_line(colour=validity_colour, spos=(origin[0], origin[1] + size[1]), epos=(origin[0] + size[0], origin[1] + size[1]), pw=1) self.screen.draw_line(colour=validity_colour, spos=(origin[0] + size[0], origin[1] + size[1]), epos=(origin[0] + size[0], origin[1]), pw=1) right_eye, left_eye, distance = None, None, [] if gaze_sample['right_gaze_origin_validity']: distance.append( round( gaze_sample[ 'right_gaze_origin_in_user_coordinate_system'][2] / 10, 1)) right_pos = gaze_sample[ 'right_gaze_origin_in_trackbox_coordinate_system'] right_eye = ((1 - right_pos[0]) * size[0] + origin[0], right_pos[1] * size[1] + origin[1]) self.screen.draw_circle(colour=validity_colour, pos=right_eye, r=int(self.disp.dispsize[0] / 100), pw=5, fill=True) if gaze_sample['left_gaze_origin_validity']: distance.append( round( gaze_sample[ 'left_gaze_origin_in_user_coordinate_system'][2] / 10, 1)) left_pos = gaze_sample[ 'left_gaze_origin_in_trackbox_coordinate_system'] left_eye = ((1 - left_pos[0]) * size[0] + origin[0], left_pos[1] * size[1] + origin[1]) self.screen.draw_circle(colour=validity_colour, pos=left_eye, r=int(self.disp.dispsize[0] / 100), pw=5, fill=True) self.screen.draw_text( text="Current distance to the eye tracker: {0} cm.".format( self._mean(distance)), pos=(int(self.disp.dispsize[0] / 2), int(self.disp.dispsize[1] * 0.9)), colour=(255, 255, 255), fontsize=20) self.disp.fill(self.screen) self.disp.show() key = self._getKeyPress() if key == "space": break # because looping doesn't seem to work if mov.status != visual.FINISHED: # pause and discard video for the audio to stop as well mov.pause() self.screen.screen.remove(mov) #video_win.close() del mov self.screen.clear() clock.pause(1000) return True else: return False # overriden method def calibrate(self, eventlog, calibrate=True): """Calibrates the eye tracker with custom child-friendly screens. arguments eventlog -- logfile instance keyword arguments calibrate -- Boolean indicating if calibration should be performed (default = True). returns success -- nowt, but a calibration log is added to the log file and some properties are updated (i.e. the thresholds for detection algorithms) """ # # # #calculate thresholds (degrees to pixels) # NOT USED self.pxfixtresh = self._deg2pix(self.screendist, self.fixtresh, self.pixpercm) # in pixels per millisecons self.pxspdtresh = self._deg2pix(self.screendist, self.spdtresh / 1000.0, self.pixpercm) # in pixels per millisecond**2 self.pxacctresh = self._deg2pix(self.screendist, self.accthresh / 1000.0, self.pixpercm) # calibration image file calibImg = c.CALIBIMG # initialize a sound snd = sound.Sound(value=c.CALIBSOUNDFILE) snd.setVolume(0.5) # image scaling range bit = 0.02 scale_range = ([x / 100.0 for x in range(60, 30, -2)] + [x / 100.0 for x in range(30, 60, 2)]) if calibrate: if not self.eyetracker: print( "WARNING! libtobii.TobiiProTracker.calibrate: no eye trackers found for the calibration!" ) self.stop_recording() return False # Tobii calibration object calibration = tr.ScreenBasedCalibration(self.eyetracker) calibrating = True calibration.enter_calibration_mode() while calibrating: eventlog.write(["Calibration started at ", clock.get_time()]) # original (normalised) points_to_calibrate = [(0.5, 0.5), (0.9, 0.1), (0.1, 0.1), (0.9, 0.9), (0.1, 0.9)] # pixel values are calculated ( based on the normalised points, with (1920,1200) (see __init__). # self.points_to_calibrate calculated values: [(960, 600), (192, 1080), (192, 120), (1728, 1080), (1728, 120)] # calibration for all calibration points for i in range(0, len(self.points_to_calibrate)): point = self.points_to_calibrate[i] eventlog.write([ "\nCalibrating point {0} at: ".format(point), clock.get_time() ]) # print "----------> Calibrating at point ", point # play the soundfile snd.play() # shrink scale = 1 for frameN in range( 20): # 20 frames -> 1/3 sec shrinking (180 to 108) self.c_screen.clear() self.c_screen.draw_image(calibImg, pos=point, scale=scale) drawCoreImage(self.c_screen, point, i) self.disp.fill(self.c_screen) self.disp.show() scale = scale - bit # grow and shrink until 'space' is pressed s = 0 for frameN in range( 12000 ): # scale down from 108 to 54, (15 frames) and back up, according to scale_range list s = frameN % 30 scale = scale_range[s] self.c_screen.clear() self.c_screen.draw_image(calibImg, pos=point, scale=scale) drawCoreImage(self.c_screen, point, i) self.disp.fill(self.c_screen) self.disp.show() if self.kb.get_key(keylist=['space'], timeout=10, flush=False)[0] == 'space': break # collect results for point (Tobii) normalized_point = self._px_2_norm(point) collect_result = calibration.collect_data( normalized_point[0], normalized_point[1]) eventlog.write([ "Collecting result for point {0} at: ".format(point), clock.get_time() ]) if collect_result != tr.CALIBRATION_STATUS_SUCCESS: eventlog.write([ "Recollecting result for point {0} at: ".format( point), clock.get_time() ]) # Try again if it didn't go well the first time. # Not all eye tracker models will fail at this point, but instead fail on ComputeAndApply. calibration.collect_data(normalized_point[0], normalized_point[1]) # grow back to original size up_scale = [ x / 100.0 for x in range(int(scale * 100), 100, 2) ] for scale in up_scale: self.c_screen.clear() self.c_screen.draw_image(calibImg, pos=point, scale=scale) drawCoreImage(self.c_screen, point, i) self.disp.fill(self.c_screen) self.disp.show() # image rolling to next point # pixelised self.points_to_calibrate = [(960, 600), (192, 1080), (192, 120), (1728, 1080), (1728, 120)] if (i < len(self.points_to_calibrate) - 1): """ screen ratio: 16/10 -> the steps for moving the images should be (16, 10) or (8, 5) """ # center -> bottom left / (960, 600) -> (192, 1080) - 48 frames while point[0] >= self.points_to_calibrate[i + 1][0]: self.c_screen.clear() point = (point[0] - 16, point[1] + 10) self.c_screen.draw_image(calibImg, pos=point) self.disp.fill(self.c_screen) self.disp.show() # bottom-left -> top-left / (192, 1080) -> (192, 120) # AND # bottom-right -> top-right / (1728, 1080) -> (1728, 120) - 80 frames while point[1] > self.points_to_calibrate[i + 1][1]: self.c_screen.clear() point = (point[0], point[1] - 12) self.c_screen.draw_image(calibImg, pos=point) self.disp.fill(self.c_screen) self.disp.show() # top-left -> bottom-right / (192, 120) -> (1728, 1080) - 96 frames while point[0] < self.points_to_calibrate[ i + 1][0] and not point[ 1] == self.points_to_calibrate[i + 1][1]: self.c_screen.clear() point = (point[0] + 16, point[1] + 10) self.c_screen.draw_image(calibImg, pos=point) self.disp.fill(self.c_screen) self.disp.show() # Tobii calibration_result = calibration.compute_and_apply() eventlog.write([ "\nCompute and apply returned {0} and collected at {1} points.\n" .format(calibration_result.status, len(calibration_result.calibration_points)) ]) print("\tCalibration: {0} - collected at {1} points.".format( calibration_result.status, len(calibration_result.calibration_points))) # Post-calibration image (while control monitor shows calibration results) self.c_screen.clear() self.c_screen.draw_image(c.ATT_IMG) self.disp.fill(self.c_screen) self.disp.show() if calibration_result.status != tr.CALIBRATION_STATUS_SUCCESS: eventlog.write([ "\n\nWARNING! libtobii.TobiiProTracker.calibrate: Calibration was unsuccessful!\n\n" ]) print("""\tCalibration was unsuccessful.\n ->Press 'R' to recalibrate all points ->or 'SPACE' to continue without calibration\n""") key = self.kb.get_key(keylist=['space', 'r'], timeout=None)[0] if key == 'r': recalibration_points = [0] elif key == 'space': recalibration_points = [] else: # call showCalibrationResults function to present the results on screen 0. The function returns a list of recalibration points logfile_dir = os.path.dirname( os.path.abspath(self.datafilepath)) recalibration_points = showCalibrationResults( logfile_dir, calibration_result) # if the list is empty, calibration is finished if len(recalibration_points) == 0: eventlog.write( ["\nCalibration finished at ", clock.get_time()]) calibrating = False # if the list contains only '0', the calibration was unsuccessful, relalibrate all points elif (recalibration_points[0] == 0): eventlog.write(["\nRecalibrating all points..."]) calibrating = True # if the list contains only '1', recalibrate all points despite successful calibration elif (recalibration_points[0] == 1): eventlog.write(["\nRecalibrating all points..."]) for point in self.points_to_calibrate: calibration.discard_data(point[0], point[1]) calibrating = True # relalibrate the returned points else: eventlog.write([ "\nRecalibrating {0} points...".format( len(recalibration_points)) ]) self.points_to_calibrate = [ self._norm_2_px(p) for p in recalibration_points ] for point in self.points_to_calibrate: calibration.discard_data(point[0], point[1]) calibrating = True calibration.leave_calibration_mode() eventlog.write([" Leaving calibration mode...", clock.get_time()]) self.stop_recording() self._write_enabled = True self.disp.close() # leaving pygaze display def _getKeyPress(self): key = self.kb.get_key(keylist=['space', 'escape'], flush=False)[0] if key and key == 'escape': self.disp.close() self.close() sys.exit() elif key: return key else: return None
log = Logfile() log.write(["trialnr", "image", "imgtime"]) # # # # # # PREPARE # load instructions from file instfile = open(INSTFILE) instructions = instfile.read() instfile.close() # read all image names images = os.listdir(IMGDIR) # display instructions scr.draw_text(text="Press any key to start the calibration.", fontsize=TEXTSIZE) disp.fill(scr) disp.show() # wait for a keypress kb.get_key(keylist=None, timeout=None, flush=True) # calibrate the eye tracker tracker.calibrate() # # # # # # RUN # display task instructions scr.clear()
log.write(["trialnr","image","imgtime"]) # # # # # # PREPARE # load instructions from file instfile = open(INSTFILE) instructions = instfile.read() instfile.close() # read all image names images = os.listdir(IMGDIR) # display instructions scr.draw_text(text="Press any key to start the calibration.", fontsize=TEXTSIZE) disp.fill(scr) disp.show() # wait for a keypress kb.get_key(keylist=None, timeout=None, flush=True) # calibrate the eye tracker tracker.calibrate() # # # # # # RUN # display task instructions scr.clear()