class Task(object): '''Display k image reversals that switch every delta_t milliseconds. Pauses image switching if is_looking signal is False. Parameters ---------- root : tkinter root object canvas : tkinter canvas object The canvas that images will be displayed on. images : list List of image files. Should be in working directory or full paths. delta_t : int or float time, in milliseconds, between image switches n_iters : int number of image flases to perform before quitting height : int height of canvas in number of pixels width : int width of canvas in number of pixels ''' def __init__(self, screen_params, display, images, n_iters=10, delta_t=100, eyetracker=None): self.display = display self.images = images self.delta_t = float(delta_t) self.n_iters = n_iters self.eyetracker = eyetracker # Create screens for both images; doing it now means it is fast self.scrn1 = Screen(**screen_params) self.scrn1.draw_image(images[0]) self.scrn2 = Screen(**screen_params) self.scrn2.draw_image(images[1]) if self.eyetracker is not None: self.eyetracker.start_recording() else: sys.exit('ERROR: must attach eyetracker object!') def _flash(self, t0): '''Hidden method that flashes between the images in images list every delta_t milliseconds. ''' position = self.eyetracker.sample() self.is_focused = check_focus(position) # Pause for specified milliseconds while elapsed(t0) < self.delta_t: pass # If eye tracker detects participant is focused switch images if self.is_focused: if self.iter_ % 2 == 0: self.display.fill(screen=self.scrn1) else: self.display.fill(screen=self.scrn2) self.display.show() self.iter_ += 1 else: pass def start(self): '''Calls the hidden _flash() method. ''' self.iter_ = 0 while self.iter_ < self.n_iters: t0 = datetime.now() self._flash(t0) self.eyetracker.stop_recording()
# input collection and storage kb = Keyboard(keylist=['escape', 'space'], timeout=None) log = Logfile() log.write(["trialnr", "trialstart", "trialend", "duration", "image"]) # run trials tracker.calibrate() for trialnr in range(0, len(IMAGES)): # blank display disp.fill() disp.show() libtime.pause(1000) # prepare stimulus scr.clear() scr.draw_image(IMAGES[trialnr]) # start recording eye movements tracker.drift_correction() tracker.start_recording() tracker.status_msg("trial %d" % trialnr) tracker.log("start trial %d" % trialnr) # present stimulus response = None trialstart = libtime.get_time() while not response: gazepos = tracker.sample() frl.update(disp, scr, gazepos) response, presstime = kb.get_key(timeout=1) # stop tracking and process input tracker.stop_recording() tracker.log("stop trial %d" % trialnr)
# fixation cross screen disp.fill(fixation_cross_screen) disp.show() libtime.pause(500) fixation_cross_screen.clear() # number screen disp.fill(number_screen) disp.show() libtime.pause(1000) number_screen.clear() #draws image pair image_pair = image_set[trialnr] face_pair_screen.draw_image(image_pair[0], pos=(center_of_screen[0] - 300, center_of_screen[1]), scale=None) #need screen width face_pair_screen.draw_image(image_pair[1], pos=(center_of_screen[0] + 300, center_of_screen[1]), scale=None) #need screen width AOI_left = AOI(aoitype="rectangle", pos=(center_of_screen[0] - 300, center_of_screen[1]), size=[326, 326]) AOI_right = AOI(aoitype="rectangle", pos=(center_of_screen[0] + 300, center_of_screen[1]), size=[326, 326]) disp.fill(face_pair_screen) disp.show() neutral_image_index = 0
# display drift check instructions scr.clear() scr.draw_text(text=instruction_dc, fontsize=TEXTSIZE_M) disp.fill(scr) disp.show() # wait for a keypress kb.get_key(keylist=None, timeout=None, flush=True) # loop through all trials ntrials = len(images) for trialnr in range(ntrials): # PREPARE TRIAL # draw the image scr.clear() scr.draw_image(os.path.join(IMGDIR, images[trialnr])) x = (DISPSIZE[0] - image_size[0]) / 2 # centre minus half of the image width y = (DISPSIZE[1] - image_size[1]) / 2 # centre minus half of the image height aoi = AOI('rectangle', (x, y), image_size) disp.fill(scr) # perform a drift check tracker.drift_correction() # RUN TRIAL # start tracking key = None tracker.start_recording() tracker.log("TRIALSTART %d" % trialnr)
class InfantTobiiTracker(TobiiProTracker): """A customised version of the pygaze TobiiProTracker.class for Tobii Pro EyeTracker objects display -- a pygaze.display.Display instance ------------------- TobiiProTracker docs: https://github.com/esdalmaijer/PyGaze/blob/5fd62ef10b04015552c61297306b6db251235e02/pygaze/_eyetracker/libtobii.py#L18 """ def __init__(self, display, logfile, eventdetection=c.EVENTDETECTION, saccade_velocity_threshold=35, saccade_acceleration_threshold=9500, blink_threshold=c.BLINKTHRESH, **args): # redefining __init__ above, so we must explicitly call the superclass' init TobiiProTracker.__init__(self, display, logfile, eventdetection=c.EVENTDETECTION, saccade_velocity_threshold=35, saccade_acceleration_threshold=9500, blink_threshold=c.BLINKTHRESH, **args) # initialize screens self.screen = Screen(dispsize=self.disp.dispsize) self.c_screen = Screen(dispsize=self.disp.dispsize) self.screen.set_background_colour(colour=(0, 0, 0)) self.points_to_calibrate = [ self._norm_2_px(p) for p in [(0.5, 0.5), (0.1, 0.9), (0.1, 0.1), (0.9, 0.9), (0.9, 0.1)] ] self.datafilepath = "{0}_TOBII_output.tsv".format(logfile) self.datafile = open(self.datafilepath, 'w') # create handle for psychopy window for pre-calibration video self.video_win = pygaze.expdisplay self.video_win.mouseVisible = False self.video_win.size = self.disp.dispsize # new method def preCalibrate(self): """Helps position the infant while playing a video. returns Boolean indicating whether the positioning is done (True: 'space' has been pressed) """ self._write_enabled = False self.start_recording() # origin: top-left corner of precalibration box; size: tuple of lengths for box sides origin = (int(self.disp.dispsize[0] / 4), int(self.disp.dispsize[1] / 4)) size = (int(2 * self.disp.dispsize[0] / 4), int(2 * self.disp.dispsize[1] / 4)) # Initialise a PsychoPy MovieStim mov = visual.MovieStim3(self.video_win, c.CALIBVIDEO, flipVert=False) # print("------------> Pre-calibration process started.") print( "\t-> When correctly positioned, press \'space\' to start the calibration." ) while mov.status != visual.FINISHED: if not self.gaze: continue self.screen.clear() # Add the MovieStim to a PyGaze Screen instance. self.screen.screen.append(mov) # self.gaze.append(gaze_data), gaze_data is the data structure provided by Tobii gaze_sample = copy.copy(self.gaze[-1]) # latest gazepoint validity_colour = (255, 0, 0) if gaze_sample['right_gaze_origin_validity'] and gaze_sample[ 'left_gaze_origin_validity']: left_validity = 0.15 < gaze_sample[ 'left_gaze_origin_in_trackbox_coordinate_system'][2] < 0.85 right_validity = 0.15 < gaze_sample[ 'right_gaze_origin_in_trackbox_coordinate_system'][2] < 0.85 if left_validity and right_validity: validity_colour = (0, 255, 0) self.screen.draw_line(colour=validity_colour, spos=origin, epos=(origin[0] + size[0], origin[1]), pw=1) self.screen.draw_line(colour=validity_colour, spos=origin, epos=(origin[0], origin[1] + size[1]), pw=1) self.screen.draw_line(colour=validity_colour, spos=(origin[0], origin[1] + size[1]), epos=(origin[0] + size[0], origin[1] + size[1]), pw=1) self.screen.draw_line(colour=validity_colour, spos=(origin[0] + size[0], origin[1] + size[1]), epos=(origin[0] + size[0], origin[1]), pw=1) right_eye, left_eye, distance = None, None, [] if gaze_sample['right_gaze_origin_validity']: distance.append( round( gaze_sample[ 'right_gaze_origin_in_user_coordinate_system'][2] / 10, 1)) right_pos = gaze_sample[ 'right_gaze_origin_in_trackbox_coordinate_system'] right_eye = ((1 - right_pos[0]) * size[0] + origin[0], right_pos[1] * size[1] + origin[1]) self.screen.draw_circle(colour=validity_colour, pos=right_eye, r=int(self.disp.dispsize[0] / 100), pw=5, fill=True) if gaze_sample['left_gaze_origin_validity']: distance.append( round( gaze_sample[ 'left_gaze_origin_in_user_coordinate_system'][2] / 10, 1)) left_pos = gaze_sample[ 'left_gaze_origin_in_trackbox_coordinate_system'] left_eye = ((1 - left_pos[0]) * size[0] + origin[0], left_pos[1] * size[1] + origin[1]) self.screen.draw_circle(colour=validity_colour, pos=left_eye, r=int(self.disp.dispsize[0] / 100), pw=5, fill=True) self.screen.draw_text( text="Current distance to the eye tracker: {0} cm.".format( self._mean(distance)), pos=(int(self.disp.dispsize[0] / 2), int(self.disp.dispsize[1] * 0.9)), colour=(255, 255, 255), fontsize=20) self.disp.fill(self.screen) self.disp.show() key = self._getKeyPress() if key == "space": break # because looping doesn't seem to work if mov.status != visual.FINISHED: # pause and discard video for the audio to stop as well mov.pause() self.screen.screen.remove(mov) #video_win.close() del mov self.screen.clear() clock.pause(1000) return True else: return False # overriden method def calibrate(self, eventlog, calibrate=True): """Calibrates the eye tracker with custom child-friendly screens. arguments eventlog -- logfile instance keyword arguments calibrate -- Boolean indicating if calibration should be performed (default = True). returns success -- nowt, but a calibration log is added to the log file and some properties are updated (i.e. the thresholds for detection algorithms) """ # # # #calculate thresholds (degrees to pixels) # NOT USED self.pxfixtresh = self._deg2pix(self.screendist, self.fixtresh, self.pixpercm) # in pixels per millisecons self.pxspdtresh = self._deg2pix(self.screendist, self.spdtresh / 1000.0, self.pixpercm) # in pixels per millisecond**2 self.pxacctresh = self._deg2pix(self.screendist, self.accthresh / 1000.0, self.pixpercm) # calibration image file calibImg = c.CALIBIMG # initialize a sound snd = sound.Sound(value=c.CALIBSOUNDFILE) snd.setVolume(0.5) # image scaling range bit = 0.02 scale_range = ([x / 100.0 for x in range(60, 30, -2)] + [x / 100.0 for x in range(30, 60, 2)]) if calibrate: if not self.eyetracker: print( "WARNING! libtobii.TobiiProTracker.calibrate: no eye trackers found for the calibration!" ) self.stop_recording() return False # Tobii calibration object calibration = tr.ScreenBasedCalibration(self.eyetracker) calibrating = True calibration.enter_calibration_mode() while calibrating: eventlog.write(["Calibration started at ", clock.get_time()]) # original (normalised) points_to_calibrate = [(0.5, 0.5), (0.9, 0.1), (0.1, 0.1), (0.9, 0.9), (0.1, 0.9)] # pixel values are calculated ( based on the normalised points, with (1920,1200) (see __init__). # self.points_to_calibrate calculated values: [(960, 600), (192, 1080), (192, 120), (1728, 1080), (1728, 120)] # calibration for all calibration points for i in range(0, len(self.points_to_calibrate)): point = self.points_to_calibrate[i] eventlog.write([ "\nCalibrating point {0} at: ".format(point), clock.get_time() ]) # print "----------> Calibrating at point ", point # play the soundfile snd.play() # shrink scale = 1 for frameN in range( 20): # 20 frames -> 1/3 sec shrinking (180 to 108) self.c_screen.clear() self.c_screen.draw_image(calibImg, pos=point, scale=scale) drawCoreImage(self.c_screen, point, i) self.disp.fill(self.c_screen) self.disp.show() scale = scale - bit # grow and shrink until 'space' is pressed s = 0 for frameN in range( 12000 ): # scale down from 108 to 54, (15 frames) and back up, according to scale_range list s = frameN % 30 scale = scale_range[s] self.c_screen.clear() self.c_screen.draw_image(calibImg, pos=point, scale=scale) drawCoreImage(self.c_screen, point, i) self.disp.fill(self.c_screen) self.disp.show() if self.kb.get_key(keylist=['space'], timeout=10, flush=False)[0] == 'space': break # collect results for point (Tobii) normalized_point = self._px_2_norm(point) collect_result = calibration.collect_data( normalized_point[0], normalized_point[1]) eventlog.write([ "Collecting result for point {0} at: ".format(point), clock.get_time() ]) if collect_result != tr.CALIBRATION_STATUS_SUCCESS: eventlog.write([ "Recollecting result for point {0} at: ".format( point), clock.get_time() ]) # Try again if it didn't go well the first time. # Not all eye tracker models will fail at this point, but instead fail on ComputeAndApply. calibration.collect_data(normalized_point[0], normalized_point[1]) # grow back to original size up_scale = [ x / 100.0 for x in range(int(scale * 100), 100, 2) ] for scale in up_scale: self.c_screen.clear() self.c_screen.draw_image(calibImg, pos=point, scale=scale) drawCoreImage(self.c_screen, point, i) self.disp.fill(self.c_screen) self.disp.show() # image rolling to next point # pixelised self.points_to_calibrate = [(960, 600), (192, 1080), (192, 120), (1728, 1080), (1728, 120)] if (i < len(self.points_to_calibrate) - 1): """ screen ratio: 16/10 -> the steps for moving the images should be (16, 10) or (8, 5) """ # center -> bottom left / (960, 600) -> (192, 1080) - 48 frames while point[0] >= self.points_to_calibrate[i + 1][0]: self.c_screen.clear() point = (point[0] - 16, point[1] + 10) self.c_screen.draw_image(calibImg, pos=point) self.disp.fill(self.c_screen) self.disp.show() # bottom-left -> top-left / (192, 1080) -> (192, 120) # AND # bottom-right -> top-right / (1728, 1080) -> (1728, 120) - 80 frames while point[1] > self.points_to_calibrate[i + 1][1]: self.c_screen.clear() point = (point[0], point[1] - 12) self.c_screen.draw_image(calibImg, pos=point) self.disp.fill(self.c_screen) self.disp.show() # top-left -> bottom-right / (192, 120) -> (1728, 1080) - 96 frames while point[0] < self.points_to_calibrate[ i + 1][0] and not point[ 1] == self.points_to_calibrate[i + 1][1]: self.c_screen.clear() point = (point[0] + 16, point[1] + 10) self.c_screen.draw_image(calibImg, pos=point) self.disp.fill(self.c_screen) self.disp.show() # Tobii calibration_result = calibration.compute_and_apply() eventlog.write([ "\nCompute and apply returned {0} and collected at {1} points.\n" .format(calibration_result.status, len(calibration_result.calibration_points)) ]) print("\tCalibration: {0} - collected at {1} points.".format( calibration_result.status, len(calibration_result.calibration_points))) # Post-calibration image (while control monitor shows calibration results) self.c_screen.clear() self.c_screen.draw_image(c.ATT_IMG) self.disp.fill(self.c_screen) self.disp.show() if calibration_result.status != tr.CALIBRATION_STATUS_SUCCESS: eventlog.write([ "\n\nWARNING! libtobii.TobiiProTracker.calibrate: Calibration was unsuccessful!\n\n" ]) print("""\tCalibration was unsuccessful.\n ->Press 'R' to recalibrate all points ->or 'SPACE' to continue without calibration\n""") key = self.kb.get_key(keylist=['space', 'r'], timeout=None)[0] if key == 'r': recalibration_points = [0] elif key == 'space': recalibration_points = [] else: # call showCalibrationResults function to present the results on screen 0. The function returns a list of recalibration points logfile_dir = os.path.dirname( os.path.abspath(self.datafilepath)) recalibration_points = showCalibrationResults( logfile_dir, calibration_result) # if the list is empty, calibration is finished if len(recalibration_points) == 0: eventlog.write( ["\nCalibration finished at ", clock.get_time()]) calibrating = False # if the list contains only '0', the calibration was unsuccessful, relalibrate all points elif (recalibration_points[0] == 0): eventlog.write(["\nRecalibrating all points..."]) calibrating = True # if the list contains only '1', recalibrate all points despite successful calibration elif (recalibration_points[0] == 1): eventlog.write(["\nRecalibrating all points..."]) for point in self.points_to_calibrate: calibration.discard_data(point[0], point[1]) calibrating = True # relalibrate the returned points else: eventlog.write([ "\nRecalibrating {0} points...".format( len(recalibration_points)) ]) self.points_to_calibrate = [ self._norm_2_px(p) for p in recalibration_points ] for point in self.points_to_calibrate: calibration.discard_data(point[0], point[1]) calibrating = True calibration.leave_calibration_mode() eventlog.write([" Leaving calibration mode...", clock.get_time()]) self.stop_recording() self._write_enabled = True self.disp.close() # leaving pygaze display def _getKeyPress(self): key = self.kb.get_key(keylist=['space', 'escape'], flush=False)[0] if key and key == 'escape': self.disp.close() self.close() sys.exit() elif key: return key else: return None
def acclimation(center_of_screen, tracker, disp, keyboard, AOI_left, AOI_right): image_set = generate_trial_images() #start trials for index in range(0,len(image_set)): # make trial screens fixation_cross_screen = Screen() fixation_cross_screen.draw_fixation(fixtype='cross', pos=center_of_screen, colour=(255,255,255), pw=5, diameter=30) number_screen = Screen() number_screen.draw_text(text=str(np.random.randint(1,10)),pos = center_of_screen, colour=(255,255,255), fontsize=40) face_pair_screen = Screen() disengagement_screen = Screen() # start with blank screen for 500 ms and start recording disp.fill() disp.show() tracker.start_recording() tracker.log("start_trial %d" %index) trialstart = libtime.get_time() libtime.pause(500) # fixation cross screen disp.fill(fixation_cross_screen) disp.show() libtime.pause(500) fixation_cross_screen.clear() # number screen disp.fill(number_screen) disp.show() libtime.pause(1000) number_screen.clear() #draws image pair image_pair = image_set[index] face_pair_screen.draw_image(image_pair[0], pos=(center_of_screen[0]-300,center_of_screen[1]), scale=None) #need screen width face_pair_screen.draw_image(image_pair[1], pos=(center_of_screen[0]+300,center_of_screen[1]), scale=None) #need screen width disp.fill(face_pair_screen) disp.show() neutral_image_index = 0 if ("NE" in image_pair[1]): neutral_image_index = 1 #NEED WHILE LOOP TO CAPTURE FIXATIONS AND TIME start_time_taken = time.time() * 1000 total_time_taken = 0 time_neutral = 0 time_emotional = 0 last_pass_time_stamp = (time.time() * 1000) - start_time_taken last_pass_time_taken = 0 first_image = 0 count_fixation_on_emotional = 0 last_fixation_on_emotional = False while total_time_taken < 3000: pressed_key = keyboard.get_key()[0] if (pressed_key == 'q'): break tracker_pos = tracker.sample() if AOI_right.contains(tracker_pos): #Add time if neutral_image_index == 0: time_emotional = time_emotional + last_pass_time_taken if not last_fixation_on_emotional: count_fixation_on_emotional = count_fixation_on_emotional + 1 last_fixation_on_emotional = True else: time_neutral = time_neutral + last_pass_time_taken last_fixation_on_emotional = False elif AOI_left.contains(tracker_pos): #Add time if neutral_image_index == 0: time_neutral = time_neutral + last_pass_time_taken last_fixation_on_emotional = False else: time_emotional = time_emotional + last_pass_time_taken if not last_fixation_on_emotional: count_fixation_on_emotional = count_fixation_on_emotional + 1 last_fixation_on_emotional = True last_pass_time_taken = (time.time() * 1000) - last_pass_time_stamp last_pass_time_stamp = (time.time() * 1000) total_time_taken = (time.time() * 1000) - start_time_taken if (pressed_key == 'q'): break #libtime.pause(3000) # 3000 ms of free viewing #image pair index 2 tells us if we need to draw a circle/square. #myRect_ontheleft = (center_of_screen[0]-300-163, center_of_screen[0]-300+163, center_of_screen[1]+163, center_of_screen[1]-163) #myRect_ontheright = (center_of_screen[0]+300-163, center_of_screen[0]+300+163, center_of_screen[1]+163, center_of_screen[1]-163) if (image_pair[2] == True): # new_face_pair_screen = swap(face_pair_screen, image_pair, tracker) #if ("Male" in image_pair[0]): #new_suffix = "_result.jpg" #else: new_suffix = circle_suffix if (random.choice([True, False]) == True): new_suffix = square_suffix image_pair[neutral_image_index] = image_pair[neutral_image_index].replace(regular_suffix, new_suffix) disengagement_screen.draw_image(image_pair[0], pos=(center_of_screen[0]-300,center_of_screen[1]), scale=None) #need screen width disengagement_screen.draw_image(image_pair[1], pos=(center_of_screen[0]+300,center_of_screen[1]), scale=None) #need screen width while keyboard.get_key()[0] == None: start_pos = tracker.sample() #face_pair_screen.draw_circle(colour=(255,255,255), pos=((start_pos[0]-center_of_screen[0]+300)**2,(start_pos[1]-center_of_screen[1])**2, 326/2)) #disp.fill(face_pair_screen) #disp.show() if neutral_image_index == 0: #area = pygame.Rect(myRect_ontheleft) #pygame.draw.rect(face_pair_screen, (100, 200, 70), area) #pygame.display.flip() #if ((start_pos[0]-center_of_screen[0]+300)**2 + (start_pos[1]-center_of_screen[1])**2)**0.5 < 100/2: if AOI_right.contains(start_pos): #face_pair_screen.draw_circle(color=(255,255,255), pos=(start_pos[0]-center_of_screen[0]+300)**2,start_pos[1]-center_of_screen[1])**2), 326/2) #print("you fixated on the right image:))") disengagement_start_time = libtime.get_time() # if fixation is started here... draw new images. #if (start_pos[0] >= center_of_screen[0]-300 and start_pos[0] <= center_of_screen[0]-300+image_HW and start_pos[1] >= center_of_screen[1] and start_pos[1] <= center_of_screen[1]+image_HW): face_pair_screen.clear() #disengagement_screen.draw_text(text="yep", pos=center_of_screen) #while keyboard.get_key()[0] == None: disp.fill(disengagement_screen) disp.show() while True: start_pos = tracker.sample() if AOI_left.contains(start_pos): print("you fixated on the right image:))") disengagement_end_time = libtime.get_time() break break # then wait for fixation on position of image_pair[1], i.e. the opposite if neutral_image_index == 1: #area = pygame.Rect(myRect_ontheright) #pygame.draw.rect(face_pair_screen, (100, 200, 70), area) #pygame.display.flip() #if ((start_pos[0]-center_of_screen[0]-300)**2 + (start_pos[1]-center_of_screen[1])**2)**0.5 < 326/2: if AOI_left.contains(start_pos): disengagement_start_time = libtime.get_time() #if (start_pos[0] >= center_of_screen[0]+300 and start_pos[0] <= center_of_screen[0]+300+image_HW and start_pos[1] >= center_of_screen[1] and start_pos[1] <= center_of_screen[1]+image_HW): face_pair_screen.clear() #disengagement_screen.draw_image(image_pair[0], pos=(center_of_screen[0]-300,center_of_screen[1]), scale=None) #need screen width #disengagement_screen.draw_image(image_pair[1], pos=(center_of_screen[0]+300,center_of_screen[1]), scale=None) #need screen width disp.fill(disengagement_screen) disp.show() while True: start_pos = tracker.sample() if AOI_right.contains(start_pos): disengagement_end_time = libtime.get_time() print("Total time taken" + str(disengagement_end_time - disengagement_start_time)) break break else: continue if (pressed_key == 'q'): break # end trial trialend = libtime.get_time() tracker.stop_recording() tracker.log("stop trial %d" % index) # log information in the end # add a way out (quit if pressing q) if keyboard.get_key()[0] == "q": break
# wait for a keypress kb.get_key(keylist=None, timeout=None, flush=True) client_thread = Client() client_thread.start() # loop through all trials ntrials = len(images) for trialnr in range(ntrials): # PREPARE TRIAL # draw the image scr.clear() scr.draw_image(os.path.join(IMGDIR, images[trialnr])) # start tracking tracker.start_recording() tracker.log("TRIALSTART %d" % trialnr) tracker.log("IMAGENAME %s" % images[trialnr]) tracker.status_msg("trial %d/%d" % (trialnr + 1, ntrials)) # perform a drift check tracker.drift_correction() # wait for the other player # RUN TRIAL # present image
# webcam camlist = libwebcam.available_devices() cam = libwebcam.Camera(dev=camlist[0], devtype=DEVTYPE, resolution=CAMRES, verflip=VFLIP, horflip=HFLIP) # # # # # # run camera display # some variables stopped = False # loop until a key is pressed while not stopped: # get new image img = cam.get_image() # draw it on the Screen scr.draw_image(img) # update Display disp.fill(scr) disp.show() # check input stopped, stoptime = kb.get_key() # # # # # # quit # neatly close cam.close() disp.close()
#screen2.draw_image(base_path1, pos=(center_of_screen[0]+300,center_of_screen[1]), scale=None) #need screen width screen3 = Screen() # Create a Screen to draw images on #screen4 = Screen() # calibrate eye tracker tracker.calibrate() #for female set for image_pair in all_image_set: screen1.clear() #draws image pair screen1.draw_image(image_pair[0], pos=(center_of_screen[0] - 300, center_of_screen[1]), scale=None) #need screen width screen1.draw_image(image_pair[1], pos=(center_of_screen[0] + 300, center_of_screen[1]), scale=None) #need screen width #space (replace with 3 seconds) #current time pairstart = libtime.get_time() if (image_pair[2] == True ): # if we have the addition, wait for fixation. REPLACE THE NEXT LINE while keyboard.get_key( )[0] == None: #Replace this with wait for fixation code disp.fill(screen1) disp.show()
# display drift check instructions scr.clear() scr.draw_text(text=instruction_dc, fontsize=TEXTSIZE_M) disp.fill(scr) disp.show() # wait for a keypress kb.get_key(keylist=None, timeout=None, flush=True) # loop through all trials ntrials = len(images) for trialnr in range(ntrials): # PREPARE TRIAL # draw the image scr.clear() scr.draw_image(os.path.join(IMGDIR, images[trialnr]), pos=(DISPSIZE[0] / 2, DISPSIZE[1] / 2)) x = (DISPSIZE[0] - image_size[0]) / 2 # centre minus half of the image width y = (DISPSIZE[1] - image_size[1]) / 2 # centre minus half of the image height aoi = AOI('rectangle', (x, y), image_size) disp.fill(scr) # perform a drift check tracker.drift_correction() # RUN TRIAL # start tracking tracker.start_recording() tracker.log("TRIALSTART %d" % trialnr) tracker.log("IMAGENAME %s" % images[trialnr])
camlist = libwebcam.available_devices() cam = libwebcam.Camera(dev=camlist[0], devtype=DEVTYPE, resolution=CAMRES, verflip=VFLIP, horflip=HFLIP) # # # # # # run camera display # some variables stopped = False # loop until a key is pressed while not stopped: # get new image img = cam.get_image() # draw it on the Screen scr.draw_image(img) # update Display disp.fill(scr) disp.show() # check input stopped, stoptime = kb.get_key() # # # # # # quit # neatly close cam.close() disp.close()