except: raiseandquit("invalid block number") save_prefix = '_'.join(['Tuna19',str(pp_id),str(block)]) print(save_prefix) if block == 1: consent_check = viz.input('Has consent been given (Y/N)?: ') try: str(consent_check) if not consent_check == 'Y': raiseandquit("Consent must be gained before proceeding") except: raiseandquit("Consent must be gained before proceeding") #TODO: check instructions #put instructions here# viz.message(""" \t\tYou will now begin the experiment \n\n The automated vehicle will attempt to navigate a series of bends. \nYour task as the supervisory driver is to make sure the vehicle stays within the road edges. \nDuring automation please keep your hands loosely on the wheel. \nYou may take control by pressing the gear pads. \nOnce pressed, you will immediately be in control of the vehicle """) viz.mouse.setVisible(viz.OFF) #switch mouse off viztask.schedule( run( CONDITIONLIST, tracks, grounds, backgrounds, cave, driver, autofiles, wheel, save_prefix ))
def run_calibration(comms, fname): fname = fname + '_calibration_accuracy' print(fname) ##MAKE WHITE BACKGROUND COLOUR FOR BETTER CALIBRATION viz.MainWindow.clearcolor(viz.WHITE) #addGroundPlane() markers = Markers() #add markers. #run through calibration programme #throw two 9 point fleixble grid. Can simple keep going until satisfied. #Needs a separate save function than the original to be completely self-sufficient. boxsize = [.6,.3] #xy box size lowerleft = [.2,.2] #starting corner #start from top right Grid = MakeGrid(3, 4, boxsize, lowerleft) imagepath = 'C:/VENLAB data/shared_modules/textures/' #fn = imagepath + 'calibmarker.png' #fn = imagepath + 'calibmarker_black.png' #pupil-labs has issues. Stops due to not collecting enough data. Might be to tell it to stop? fn = imagepath + 'calibmarker_white.png' #seems to work best with this one. #fn = imagepath + 'calibmarker_white_old.png' def loadimage(fn): """Loads a and scales a texture from a given image path""" defaultscale = 800.0/600.0 aspect = 1920.0 / 1080.0 scale = aspect/defaultscale ttsize = 1 pt = viz.add(viz.TEXQUAD, viz.SCREEN) pt.scale(ttsize, ttsize*scale, ttsize) pt.texture(viz.add(fn)) pt.translate(Grid[0][0],Grid[0][1]) # Now you can specify screen coordinates, so the visual angle is OK (i.e. no depth) pt.visible(0) return (pt) pt = loadimage(fn) pt_buffer = loadimage(imagepath + 'calibmarker_buffer.png') pt_buffer.visible(0) #### CALIB GRID ##### #need to make sure they are the same visual angle (account for the depth of the virtual world). #test the calibration by plotting the calibration sequence taken from the eyetracker (onto the dots) #(0.37, 0.6) #Point1 # (0.485, 0.6) #Point 2 # (0.6, 0.6) #Point 3 # (0.37, 0.495) #Point 4 # (0.485, 0.495) #Point 5 # (0.6, 0.495) #Point 6 # (0.37, 0.39) #Point 7 # (0.485, 0.39) #Point 8 # (0.6, 0.39) #Point 9 viz.message('\t\t\t CALIBRATION \n\nPlease look at the centre of the calibration target. Try and move your head as little as possible') calib_flag = 0 record_flag = 0 satisfied = False i = 0 #index for point. comms.send_msg('C') #start calibration #Add buffer point pt_buffer.visible(1) pt.visible(0) yield viztask.waitTime(.75) #wait for half a second pt_buffer.visible(0) #remove buffer point pt.visible(1) while not satisfied: msg_rcv = comms.poll_msg() if 'calibration.marker_sample_completed' in msg_rcv: pt_buffer.visible(1) pt.visible(0) yield viztask.waitTime(.5) #wait for half a second i = i+1 if i > 11: #clamp i comms.send_msg('c') while True: msg_rcv = comms.poll_msg() if True in ['calibration' in j for j in msg_rcv]: out = [j for j in msg_rcv if 'calibration' in j][0] out = out[12:] print (out) calib_accuracy = out.split('//')[0] calib_precision = out.split('//')[1] # calib_accuracy = out.split('calibration.Accuracy')[1].split('.Precision')[0] # calib_precision = out.split('calibration.Accuracy')[1].split('.Precision')[0] happy = vizinput.ask("Calibration Accuracy: " + calib_accuracy + "\nAre you satisfied?") if happy: print ("happy") satisfied = True pt.visible(0) pt_buffer.visible(0) save_calibration([calib_accuracy, calib_precision, time.time(), True], fname) break else: print ("not happy") satisfied = False save_calibration([calib_accuracy, calib_precision, time.time(), False], fname) i = 0 pt.translate(Grid[i][0], Grid[i][1]) pt_buffer.translate(Grid[i][0], Grid[i][1]) #do the first one. comms.send_msg('C') pt_buffer.visible(1) pt.visible(0) yield viztask.waitTime(.75) #wait for half a second pt_buffer.visible(0) #remove buffer point pt.visible(1) break #yield viztask.returnValue(happy) #Now check if the calibration accuracy is good enough. Else run through again else: pt.translate(Grid[i][0], Grid[i][1]) pt_buffer.translate(Grid[i][0], Grid[i][1]) #Add buffer point #pt_buffer.visible(1) #pt.visible(0) yield viztask.waitTime(.75) #wait for half a second pt_buffer.visible(0) #remove buffer point pt.visible(1) yield viztask.waitTime(.5)
def ShowResults(): select.disable( viz.RENDERING ) select.disable( viz.COLLIDE_NOTIFY ) viz.playSound('Full_Body_Bip001/fire3.wav',viz.STOP) global globalSelected viz.message( 'Great Job!\nYou put out {0:d} fires in 30 seconds!'.format(globalSelected) )
carchoice = 1 currentcar = 'Mini' speed = 0.0 steer = 0.0 motion = 0 rotateY = 0.0 rotateX = 0.0 viz.clip(0.1, 30000) vizconnect.go('camera.py') viz.splashScreen('assets/tracksplash.jpg') viz.phys.enable() #carchoiceinit = vizinput.choose('Choose a car:', ['Mini','BMW','Ford Thunderbird','Ford Focus','Lamborghini Murcielago','TVR Speed 12','Dodge Challenger', 'Caterham Seven']) #carchoice = carchoiceinit + 1 environmentchoice = vizinput.choose('Select time of day:', ['Day', 'Night']) viz.message( 'Arrow Keys to steer. Z to go down a gear, X to go up a gear. G to swap between automatic/manual. Up to accelerate, Down to reverse (Automatic only). Spacebar to brake.' ) #import Drivingfunctions track = viz.addChild('assets/environment/track.osgb') station = viz.addChild('assets/environment/gasStation.fbx') #buildings = viz.addChild('assets/environment/City.osgb') tower = viz.addChild('assets/environment/Building.fbx') if environmentchoice == 0: environment = viz.addChild('sky_day.osgb') else: environment = viz.addChild('sky_night.osgb') environment.setScale(10, 10, 10) track.setScale(5, 0, 5) station.setScale(5, 5, 5) #buildings.setScale(1, 1, 1) station.setEuler(90, 0, 0)
def run_accuracy(comms, fname): fname = fname + '_accuracy_test' print(fname) ##MAKE WHITE BACKGROUND COLOUR FOR BETTER CALIBRATION #viz.MainWindow.clearcolor(viz.WHITE) #draw roadedges #fName = 'textures\strong_edge.bmp' # fName = imagepath + 'strong_edge.bmp' # # # add groundplane (wrap mode) # groundtexture = viz.addTexture(fName) # groundtexture.wrap(viz.WRAP_T, viz.REPEAT) # groundtexture.wrap(viz.WRAP_S, viz.REPEAT) # # groundplane = viz.addTexQuad() ##ground for right bends (tight) # tilesize = 300 # planesize = tilesize/5 # groundplane.setScale(tilesize, tilesize, tilesize) # groundplane.setEuler((0, 90, 0),viz.REL_LOCAL) # matrix = vizmat.Transform() # matrix.setScale( planesize, planesize, planesize ) # groundplane.texmat( matrix ) # groundplane.texture(groundtexture) # groundplane.visible(1) #markers = Markers() #add markers. #run through calibration programme #throw two 9 point fleixble grid. Can simple keep going until satisfied. #Needs a separate save function than the original to be completely self-sufficient. #boxsize = [.9,.8] #xy box size #lowerleft = [.05,.1] #starting corner boxsize = [.6, .4] #xy box size lowerleft = [.2, .2] #starting corner #start from top right nrow = 4 ncol = 3 Grid = MakeGrid(nrow, ncol, boxsize, lowerleft) nmarkers = nrow * ncol imagepath = 'C:/VENLAB data/shared_modules/textures/' #fn = imagepath + 'calibmarker.png' #fn = imagepath + 'calibmarker_black.png' #pupil-labs has issues. Stops due to not collecting enough data. Might be to tell it to stop? fn = imagepath + 'calibmarker_white.png' #seems to work best with this one. #fn = imagepath + 'calibmarker_white_old.png' def loadimage(fn): """Loads a and scales a texture from a given image path""" defaultscale = 800.0 / 600.0 aspect = 1920.0 / 1080.0 scale = aspect / defaultscale ttsize = 1 pt = viz.add(viz.TEXQUAD, viz.SCREEN) pt.scale(ttsize, ttsize * scale, ttsize) pt.texture(viz.add(fn)) pt.translate( Grid[0][0], Grid[0][1] ) # Now you can specify screen coordinates, so the visual angle is OK (i.e. no depth) pt.visible(0) return (pt) pt = loadimage(fn) pt_buffer = loadimage(imagepath + 'calibmarker_buffer.png') pt_buffer.visible(0) #### CALIB GRID ##### #need to make sure they are the same visual angle (account for the depth of the virtual world). #test the calibration by plotting the calibration sequence taken from the eyetracker (onto the dots) #(0.37, 0.6) #Point1 # (0.485, 0.6) #Point 2 # (0.6, 0.6) #Point 3 # (0.37, 0.495) #Point 4 # (0.485, 0.495) #Point 5 # (0.6, 0.495) #Point 6 # (0.37, 0.39) #Point 7 # (0.485, 0.39) #Point 8 # (0.6, 0.39) #Point 9 viz.message( '\t\t\tACCURACY TEST \n\nPlease look at the white dot in the very centre of the accuracy target. Try and move your head as little as possible' ) calib_flag = 0 record_flag = 0 satisfied = False i = 0 #index for point. #normalise markers on surface print(Grid) #calibpositions_normed = normaliseToSurface(Grid, markers.boxsize, markers.lowerleft) #print (calibpositions_normed) #comms.send_marker_positions(calibpositions_normed) comms.send_msg('P') #start accuracy test #add buffer point pt_buffer.visible(1) pt.visible(0) yield viztask.waitTime(.75) #wait for half a second pt_buffer.visible(0) #remove buffer point pt.visible(1) while not satisfied: msg_rcv = comms.poll_msg() if 'calibration.marker_sample_completed' in msg_rcv: pt_buffer.visible(1) #add buffer point pt.visible(0) yield viztask.waitTime(.5) #wait for half a second i = i + 1 if i > nmarkers - 1: #clamp i comms.send_msg('p') while True: msg_rcv = comms.poll_msg() if True in ['calibration' in j for j in msg_rcv]: out = [j for j in msg_rcv if 'calibration' in j][0] calib_accuracy = out.split('//')[0] calib_precision = out.split('//')[1] # calib_accuracy = out.split('calibration.Accuracy')[1].split('.Precision')[0] # calib_precision = out.split('calibration.Accuracy')[1].split('.Precision')[0] satisfied = True save_calibration([ calib_accuracy, calib_precision, time.time(), True ], fname) pt.visible(0) pt_buffer.visible(0) break # happy = vizinput.ask("Calibration Accuracy: " + calib_accuracy + "\nAre you satisfied?") # if happy: # print ("happy") # satisfied = True # pt.visible(0) # break # else: # print ("not happy") # satisfied = False # i = 0 # pt.translate(Grid[i][0], Grid[i][1]) # comms.send_msg('P') # break #yield viztask.returnValue(happy) #Now check if the calibration accuracy is good enough. Else run through again else: pt.translate(Grid[i][0], Grid[i][1]) pt_buffer.translate(Grid[i][0], Grid[i][1]) #pt.translate(0,0) yield viztask.waitTime(.75) #wait for half a second pt_buffer.visible(0) #remove buffer point pt.visible(1) yield viztask.waitTime(.5)
def RunProgram(prg, timeslice, pc): global PGCOMPLETE global END # global output # listitems=output.getItems() # #clear output # for i in listitems: # output.removeItem(i,True) #------------------------------- oldpc = pc global PC global PID proc.visible(True) ACTUALTIME = 0 global p global q global z global sq global num global h1 global h2 global h3 global n1 global n2 global n3 global n4 global avgn global v global i global s global loop global k global y global m global d global e global g #PRG2=MULT2NUMBERS.splitlines(False) out("*******************************") while (ACTUALTIME < timeslice): exec prg[pc] PID.message("RUNNING") PC.message(str(pc)) if str(prg[pc]) == "#": PID.message("TERMINATED") viz.setTheme(TERMINATED_THEME, True, PID) ACTUALTIME = timeslice PGCOMPLETE = PGCOMPLETE + 1 if PGCOMPLETE == 10: #all programs have been completed END = True viz.message("All programs have been completed successfuly!") else: viz.setTheme(RUNNING_THEME, True, PID) pc = pc + 1 ACTUALTIME = ACTUALTIME + 1
def runtrials(self): """Loops through the trial sequence""" viz.MainScene.visible(viz.ON, viz.WORLD) viz.mouse.setVisible(viz.OFF) #switch mouse off viz.clearcolor( viz.SKYBLUE) #HACK, since eyetracker background is white. if self.EYETRACKING: #pass it the filename, and also the timestamp. et_file = str(self.EXP_ID) + '_' + str( self.PP_id) #one file for the whole task. self.comms.start_trial(fname=et_file, timestamp=viz.tick()) if self.EYETRACKING: #viz.MainScene.visible(viz.OFF,viz.WORLD) #remove straight self.Straight.ToggleVisibility(0) filename = str(self.EXP_ID) + "_Calibration_" + str( self.PP_id ) #+ str(demographics[0]) + "_" + str(demographics[2]) #add experimental block to filename print(filename) # Start logging the pupil data pupilfile = gzip.open( os.path.join("Data", filename + ".pupil.jsons.gz"), 'a') closer = pupil_logger.start_logging(pupilfile, timestamper=viz.tick) def stop_pupil_logging(): closer() pupilfile.close() EXIT_CALLBACKS.insert(0, stop_pupil_logging) yield run_calibration(self.comms, filename) yield run_accuracy(self.comms, filename) #put straight visible self.Straight.ToggleVisibility(1) #add message after calibration to give the experimenter and participant time to prepare for the simulation. self.markers = Markers() #set up distractor task if self.DISTRACTOR_TYPE is not None: distractorfilename = str(self.EXP_ID) + '_' + str( self.PP_id) + '_distractor_' Distractor = Count_Adjustable.Distractor( distractorfilename, self.targetnumber, ppid=1, startscreentime=self.StartScreenTime, triallength=np.inf, ntrials=len(self.TRIALSEQ_df.index)) else: Distractor = None #set up scene before eyetracking self.driver = vizdriver.Driver(self.caveview, Distractor) viz.message( '\t\tYou will now begin the experiment \n\n The automated vehicle will attempt to navigate a series of bends. \nYour task as the supervisory driver is to make sure the vehicle stays within the road edges. \nDuring automation please keep your hands loosely on the wheel. \nYou may take control by pressing the gear pads. \nOnce pressed, you will immediately be in control of the vehicle \n\n Please fixate the centre of the calibration point in between trials' ) self.ToggleTextVisibility(viz.ON) for i, trial in self.TRIALSEQ_df.iterrows(): #if half-way through do accuracy test. #Trial loop has finished. if i == int(np.round(self.total_trials / 2, 0)): if self.EYETRACKING: self.markers.markers_visibility( 0) #remove markers for calibration self.Straight.ToggleVisibility(0) accuracy_filename = filename + '_middle' yield run_accuracy(self.comms, accuracy_filename) yield viztask.waitTime( 1) #a second pause before going into self.markers.markers_visibility( 1) #remove markersthe next trial self.Straight.ToggleVisibility(1) #import vizjoy print("Trialn: ", str(i)) print("current trial:", trial) #trial is now a row from a dataframe print("current trial radius:", trial["radius"]) trial_radii = trial['radius'] trial_yawrate_offset = trial['sab'] trial_dir = trial['bend'] print(str([trial_radii, trial_yawrate_offset])) txtDir = "" #print ("Length of bend array:", len(self.rightbends)) self.driver.setAutomation(True) self.AUTOMATION = True self.txtMode.message('A') if self.AUTOWHEEL: self.Wheel.control_on() if self.DISTRACTOR_TYPE is not None: if i == 0: #the first trial. #annotate eyetracking if self.EYETRACKING: self.comms.annotate("DistractorScreen") #switch texts off for the first trial. self.ToggleTextVisibility(viz.OFF) Distractor.StartTrial(self.targetoccurence_prob, self.targetnumber, trialn=i, displayscreen=True) #starts trial yield viztask.waitTrue(Distractor.getPlaySoundFlag) self.ToggleTextVisibility(viz.ON) else: Distractor.StartTrial(self.targetoccurence_prob, self.targetnumber, trialn=i, displayscreen=False) #starts trial radius_index = self.FACTOR_radiiPool.index(trial_radii) #choose correct road object. if trial_dir > 0: #right bend trialbend = self.rightbends[radius_index] txtDir = "R" else: trialbend = self.leftbends[radius_index] txtDir = "L" #trialbend = self.rightbends[radius_index] #txtDir = "R" trialbend.ToggleVisibility(viz.ON) if trial_radii > 0: #if trial_radii is above zero it is a bend, not a straight msg = "Radius: " + str(trial_radii) + txtDir + '_' + str( trial_yawrate_offset) else: msg = "Radius: Straight" + txtDir + '_' + str( trial_yawrate_offset) # txtCondt.message(msg) #pick radius self.Trial_radius = trial_radii #pick file. Put this in dedicated function. TODO: Should open all of these at the start of the file to save on processing. self.Trial_autofile_i = int(trial['autofile_i']) self.Trial_YR_readout = self.YR_readouts_80[self.Trial_autofile_i] self.Trial_SWA_readout = self.SWA_readouts_80[ self.Trial_autofile_i] self.Trial_playbackfilename = self.PlaybackPool80[ self.Trial_autofile_i] """ if self.Trial_radius == 40: i = random.choice(range(len(self.YR_readouts_40))) self.Trial_YR_readout = self.YR_readouts_40[i] self.Trial_SWA_readout = self.SWA_readouts_40[i] self.Trial_playbackfilename = self.PlaybackPool40[i] elif self.Trial_radius == 80: i = random.choice(range(len(self.YR_readouts_80))) self.Trial_YR_readout = self.YR_readouts_80[i] self.Trial_SWA_readout = self.SWA_readouts_80[i] self.Trial_playbackfilename = self.PlaybackPool80[i] else: raise Exception("Something bad happened") """ #update class# self.Trial_simulatedttlc = trial['simulated_ttlc'] self.Trial_design = trial['design'] self.Trial_dir = trial_dir self.Trial_N = i self.Trial_YawRate_Offset = trial_yawrate_offset self.Trial_BendObject = trialbend self.Trial_trialtype_signed = trial_dir self.Trial_playbacklength = len(self.Trial_YR_readout) self.Trial_midline = np.vstack( (self.Straight.midline, self.Trial_BendObject.midline)) self.Trial_OnsetTime = trial['onsettime'] #self.Trial_OnsetTime = np.random.choice(self.OnsetTimePool, size=1)[0] self.Trial_SaveName = str(self.EXP_ID) + '_' + str( self.PP_id) + '_' + str(self.Trial_N) #renew data frame. #self.OutputWriter = pd.DataFrame(index = range(self.TrialLength*60), columns=self.datacolumns) #make new empty EndofTrial data #renew csv writer self.OutputFile = io.BytesIO() self.OutputWriter = csv.writer(self.OutputFile) self.OutputWriter.writerow(self.datacolumns) #write headers. #annotate eyetracking if self.EYETRACKING: self.comms.annotate('Start_' + self.Trial_SaveName) yield viztask.waitTime(.5) #pause at beginning of trial #annotate eyetracking if self.EYETRACKING: #remove calib_pt and wait a further .5 s #TODO: add 1 s calibration dot. self.calib_pt.visible(1) yield viztask.waitTime(1.5) #pause at beginning of trial self.calib_pt.visible(0) yield viztask.waitTime(.5) #pause at beginning of trial if self.DEBUG: conditionmessage = 'SAB: ' + str(self.Trial_YawRate_Offset) + \ '\nRadius: ' +str(self.Trial_radius) + \ '\nOnsetTime: ' + str(self.Trial_OnsetTime) + \ '\nAutoFile: ' + str(self.Trial_autofile_i) + \ '\nsim TTLC: ' + str(self.Trial_simulatedttlc) + \ '\nDesign: ' + str(self.Trial_design) + \ '\nTask: ' + str(self.DISTRACTOR_TYPE) self.txtTrial.message(conditionmessage) if self.DEBUG_PLOT: #realtime plot. self.line_midline.set_data(self.Trial_midline[:, 0], self.Trial_midline[:, 1]) self.dot_origin.set_data( self.Trial_BendObject.CurveOrigin[0], self.Trial_BendObject.CurveOrigin[1]) self.plot_ax.axis([ min(self.Trial_midline[:, 0]) - 10, max(self.Trial_midline[:, 0]) + 10, min(self.Trial_midline[:, 1]) - 10, max(self.Trial_midline[:, 1]) + 10 ]) #set axis limits self.plot_positionarray_x, self.plot_positionarray_z, self.plot_closestpt_x, self.plot_closestpt_z = [], [], [], [] #arrays to store plot data in self.UPDATELOOP = True # def PlaybackReached(): """checks for playback limit or whether automation has been disengaged""" end = False #check whether automation has been switched off. if self.Current_playbackindex >= self.Trial_playbacklength: end = True return (end) def CheckDisengage(): """checks automation status of driver class """ end = False auto = self.driver.getAutomation() if auto == False: self.AUTOMATION = auto self.txtMode.message('M') #switch wheel control off, because user has disengaged #begin = timer() if self.AUTOWHEEL: self.Wheel.control_off() if self.EYETRACKING: self.comms.annotate('Disengage_' + self.Trial_SaveName) #pass #print ("WheelControlOff", timer() - begin) end = True return (end) #create viztask functions. waitPlayback = viztask.waitTrue(PlaybackReached) waitDisengage = viztask.waitTrue(CheckDisengage) d = yield viztask.waitAny([waitPlayback, waitDisengage]) if d.condition is waitPlayback: print('Playback Limit Reached') elif d.condition is waitDisengage: print('Automation Disengaged') self.SingleBeep() def RoadRunout(): """temporary HACK function to check whether the participant has ran out of road""" end = False if self.Trial_Timer > self.TrialLength: end = True return (end) #waitRoad = viztask.waitTrue (RoadRunout) #waitManual = viztask.waitTime(5) #d = yield viztask.waitAny( [ waitRoad, waitManual ] ) yield viztask.waitTrue(RoadRunout) print("Run out of Road") #if d.condition is waitRoad: # print ('Run out of Road') #elif d.condition is waitManual: # print ('Manual Time Elapsed') ##### END STEERING TASK ###### self.UPDATELOOP = False self.Trial_BendObject.ToggleVisibility(viz.OFF) ##reset trial. Also need to annotate each eyetracking trial. viz.director(self.SaveData, self.OutputFile, self.Trial_SaveName) self.ResetTrialAndDriver( ) #reset parameters for beginning of trial ##### INITIALISE END OF TRIAL SCREEN FOR DISTRACTOR TASK ####### if self.DISTRACTOR_TYPE is not None: #annotate eyetracking if self.EYETRACKING: self.comms.annotate('Distractor_' + self.Trial_SaveName) if self.AUTOWHEEL: self.Wheel.control_off() #switch text off self.ToggleTextVisibility(viz.OFF) Distractor.EndofTrial() #throw up the screen to record counts. # Pause before the query screen to avoid # spurious presses carrying over from the # task. # Hack the screen to be blank Distractor.EoTScreen.visible(viz.ON) Distractor.Question.visible(viz.OFF) Distractor.lblscore.visible(viz.OFF) yield viztask.waitTime(1.0) Distractor.EoTScreen_Visibility(viz.ON) ###interface with End of Trial Screen pressed = 0 while pressed < self.targetnumber: #keep looking for gearpad presses until pressed reaches trial_targetnumber print("waiting for gear press") yield viztask.waitTrue(self.driver.getGearPressed) pressed += 1 print('pressed ' + str(pressed)) Distractor.gearpaddown() self.driver.setGearPressed(False) yield viztask.waitTime(.5) #Distractor.EoTScreen_Visibility(viz.OFF) Distractor.RecordCounts() self.ToggleTextVisibility(viz.ON) #annotate eyetracking if self.EYETRACKING: self.comms.annotate('End_' + self.Trial_SaveName) #Trial loop has finished. if self.EYETRACKING: self.markers.remove_markers() #remove markers self.Straight.ToggleVisibility(0) accuracy_filename = filename + '_end' yield run_accuracy(self.comms, accuracy_filename) self.CloseConnections()