hideWindow(window) # Display calibration gfx window and run calibration. result = tracker.runSetupProcedure() print("Calibration returned: ", result) # Maximize the PsychoPy window if needed showWindow(window) flip_time = window.flip() io_hub.sendMessageEvent(text="EXPERIMENT_START", sec_time=flip_time) # Send some information to the ioDataStore as experiment messages, # including the experiment and session id's, the calculated pixels per # degree, display resolution, etc. # io_hub.sendMessageEvent(text="IO_HUB EXPERIMENT_INFO START") io_hub.sendMessageEvent(text="ioHub Experiment started {0}".format(getCurrentDateTimeString())) io_hub.sendMessageEvent(text="Experiment ID: {0}, Session ID: {1}".format(io_hub.experimentID, io_hub.experimentSessionID)) io_hub.sendMessageEvent(text="Stimulus Screen ID: {0}, " "Size (pixels): {1}, CoordType: {2}".format(display.getIndex(), display.getPixelResolution(), display.getCoordinateType())) io_hub.sendMessageEvent(text="Calculated Pixels Per Degree: {0} x, {1} y".format(*display.getPixelsPerDegree())) io_hub.sendMessageEvent(text="IO_HUB EXPERIMENT_INFO END") io_hub.clearEvents('all') # For each trial in the set of trials within the current block. # t = 0 for trial in trials:
def run(self,*args): """ The run method contains your experiment logic. In this example we: 1) Load an xlsx file containing the trial conditions for use during the experiment. All DV's and IV's to be used or updated for each trial must be specified as columns in the xlsx file. 2) Inform the ioDataStore of the trial conditions to be used, resulting in the creation of an experiment specific results table, with a field for each DV and IV defined in the xls file. 3) Run the eye tracking device's runSetupProcedure(), which allows the calibration, validation, etc. of the eye tracking system being used. 4) Create the experiment runtime graphics, including creating a cache of images to be displayed for each trial of the experiment. 5) Run the experimental block of trials of the demo. Each trial sequence consists of: a) The participant pressing the SPACE key to start the trial. b) Randomly displaying one of the background images for a trial. c) Starting recording of data from the eye tracker. d) Displaying a gaze contingent dot located at the gaze position reported by the eye tracker. e) Ending each trial by pressing the SPACE key. f) Sending any condition variable value changes for that trial to the ioDataStore for easy future selection of device events recorded during the trial or for specific condition variable values. g) Stopping of event recording on the eye tracker device. """ exp_conditions=importConditions('trial_conditions.xlsx') trials = TrialHandler(exp_conditions,1) # Inform the ioDataStore that the experiment is using a # TrialHandler. The ioDataStore will create a table # which can be used to record the actual trial variable values (DV or IV) # in the order run / collected. # self.hub.createTrialHandlerRecordTable(trials) # Let's make some short-cuts to the devices we will be using # in this demo. try: tracker=self.hub.devices.tracker except Exception: # No eye tracker config found in iohub_config.yaml from psychopy.iohub.util import MessageDialog md = MessageDialog(title="No Eye Tracker Configuration Found", msg="Update the iohub_config.yaml file by " "uncommenting\nthe appropriate eye tracker " "config lines.\n\nPress OK to exit demo.", showButtons=MessageDialog.OK_BUTTON, dialogType=MessageDialog.ERROR_DIALOG, allowCancel=False, display_index=0) md.show() return 1 display=self.hub.devices.display kb=self.hub.devices.keyboard mouse=self.hub.devices.mouse # Start by running the eye tracker default setup procedure. # The details of the setup procedure (calibration, validation, etc) # are unique to each implementation of the Common Eye Tracker Interface. # All have the common end goal of calibrating the eye tracking system # prior to data collection. # Please see the eye tracker interface implementation details for the # hardware being used at: # http://www.isolver-solutions.com/iohubdocs/iohub/api_and_manual/device_details/eyetracker.html#eye-tracking-hardware-implementations # tracker.runSetupProcedure() # Create a psychopy window for the experiment graphics, # ioHub supports the use of one full screen window during # the experiment runtime. (If you are using a window at all). # res=display.getPixelResolution() # Current pixel resolution of the Display to be used coord_type=display.getCoordinateType() window=visual.Window(res,monitor=display.getPsychopyMonitorName(), # name of the PsychoPy Monitor Config file if used. units=coord_type, # coordinate space to use. fullscr=True, # We need full screen mode. allowGUI=False, # We want it to be borderless screen= display.getIndex() # The display index to use, assuming a multi display setup. ) # Create a dict of image stim for trials and a gaze blob to show the # reported gaze position with. # image_cache=dict() image_names=['canal.jpg','fall.jpg','party.jpg','swimming.jpg','lake.jpg'] for iname in image_names: image_cache[iname]=visual.ImageStim(window, image=os.path.join('./images/',iname), name=iname,units=coord_type) # Create a circle to use for the Gaze Cursor. Current units assume pix. # gaze_dot =visual.GratingStim(window,tex=None, mask="gauss", pos=(0,0 ),size=(66,66),color='green', units=coord_type) # Create a Text Stim for use on /instruction/ type screens. # Current units assume pix. instructions_text_stim = visual.TextStim(window, text='', pos=[0,0], height=24, color=[-1,-1,-1], colorSpace='rgb', wrapWidth=window.size[0]*.9) # Update Instruction Text and display on screen. # Send Message to ioHub DataStore with Exp. Start Screen display time. # instuction_text="Press Any Key to Start Experiment." instructions_text_stim.setText(instuction_text) instructions_text_stim.draw() flip_time=window.flip() self.hub.sendMessageEvent(text="EXPERIMENT_START",sec_time=flip_time) # Wait until a key event occurs after the instructions are displayed self.hub.clearEvents('all') kb.waitForPresses() # Send some information to the ioDataStore as experiment messages, # including the experiment and session id's, the calculated pixels per # degree, display resolution, etc. # self.hub.sendMessageEvent(text="IO_HUB EXPERIMENT_INFO START") self.hub.sendMessageEvent(text="ioHub Experiment started {0}".format(getCurrentDateTimeString())) self.hub.sendMessageEvent(text="Experiment ID: {0}, Session ID: {1}".format(self.hub.experimentID,self.hub.experimentSessionID)) self.hub.sendMessageEvent(text="Stimulus Screen ID: {0}, Size (pixels): {1}, CoordType: {2}".format(display.getIndex(),display.getPixelResolution(),display.getCoordinateType())) self.hub.sendMessageEvent(text="Calculated Pixels Per Degree: {0} x, {1} y".format(*display.getPixelsPerDegree())) self.hub.sendMessageEvent(text="IO_HUB EXPERIMENT_INFO END") self.hub.clearEvents('all') # For each trial in the set of trials within the current block. # t=0 for trial in trials: # Update the instruction screen text to indicate # a trial is about to start. # instuction_text="Press Space Key To Start Trial %d"%t instructions_text_stim.setText(instuction_text) instructions_text_stim.draw() flip_time=window.flip() self.hub.sendMessageEvent(text="EXPERIMENT_START",sec_time=flip_time) # Wait until a space key press event occurs after the # start trial instuctions have been displayed. # self.hub.clearEvents('all') kb.waitForPresses(keys=[' ',]) # Space Key has been pressed, start the trial. # Set the current session and trial id values to be saved # in the ioDataStore for the upcoming trial. # trial['session_id']=self.hub.getSessionID() trial['trial_id']=t+1 # Send a msg to the ioHub indicating that the trial started, and the time of # the first retrace displaying the trial stm. # self.hub.sendMessageEvent(text="TRIAL_START",sec_time=flip_time) # Start Recording Eye Data # tracker.setRecordingState(True) # Get the image stim for this trial. # imageStim=image_cache[trial['IMAGE_NAME']] imageStim.draw() flip_time=window.flip() # Clear all the events received prior to the trial start. # self.hub.clearEvents('all') # Send a msg to the ioHub indicating that the trial started, # and the time of the first retrace displaying the trial stim. # self.hub.sendMessageEvent(text="TRIAL_START",sec_time=flip_time) # Set the value of the trial start variable for this trial # trial['TRIAL_START']=flip_time # Loop until we get a keyboard event # run_trial=True while run_trial is True: # Get the latest gaze position in display coord space.. # gpos=tracker.getPosition() if type(gpos) in [tuple,list]: # If we have a gaze position from the tracker, # redraw the background image and then the # gaze_cursor at the current eye position. # gaze_dot.setPos([gpos[0],gpos[1]]) imageStim.draw() gaze_dot.draw() else: # Otherwise just draw the background image. # This will remove the gaze cursor from the screen # when the eye tracker is not successfully # tracking eye position. # imageStim.draw() # Flip video buffers, displaying the stim we just # updated. # flip_time=window.flip() # Send an experiment message to the ioDataStore # indicating the time the image was drawn and # current position of gaze spot. # if type(gpos) in [tuple,list]: self.hub.sendMessageEvent("IMAGE_UPDATE %s %.3f %.3f"%( trial['IMAGE_NAME'],gpos[0],gpos[1]), sec_time=flip_time) else: self.hub.sendMessageEvent("IMAGE_UPDATE %s [NO GAZE]"%( trial['IMAGE_NAME']), sec_time=flip_time) # Check any new keyboard press events by a space key. # If one is found, set the trial end variable and break. # from the loop if kb.getPresses(keys=[' ',]): run_trial=False break # The trial has ended, so update the trial end time condition value, # and send a message to the ioDataStore with the trial end time. # flip_time=window.flip() trial['TRIAL_END']=flip_time self.hub.sendMessageEvent(text="TRIAL_END %d"%t,sec_time=flip_time) # Stop recording eye data. # In this example, we have no use for any eye data # between trials, so why save it. # tracker.setRecordingState(False) # Save the experiment condition variable values for this # trial to the ioDataStore. # self.hub.addTrialHandlerRecord(trial) # Clear all event buffers # self.hub.clearEvents('all') t+=1 # All trials have been run, so end the experiment. # flip_time=window.flip() self.hub.sendMessageEvent(text='EXPERIMENT_COMPLETE',sec_time=flip_time) # Disconnect the eye tracking device. # tracker.setConnectionState(False) # The experiment is done, all trials have been run. # Clear the screen and show an 'experiment done' message using the # instructionScreen text. # instuction_text="Press Any Key to Exit Demo" instructions_text_stim.setText(instuction_text) instructions_text_stim.draw() flip_time=window.flip() self.hub.sendMessageEvent(text="SHOW_DONE_TEXT",sec_time=flip_time) self.hub.clearEvents('all') # wait until any key is pressed kb.waitForPresses()
def run(self, *args): """ The run method contains your experiment logic. It is equal to what would be in your main psychopy experiment script.py file in a standard psychopy experiment setup. That is all there is too it really. """ exp_conditions = importConditions('trial_conditions.xlsx') trials = TrialHandler(exp_conditions, 1) # Inform the ioDataStore that the experiment is using ac # TrialHandler. The ioDataStore will create a table # which can be used to record the actual trial variable values (DV or IV) # in the order run / collected. # self.hub.createTrialHandlerRecordTable(trials) selected_eyetracker_name = args[0] # Let's make some short-cuts to the devices we will be using in this 'experiment'. tracker = self.hub.devices.tracker display = self.hub.devices.display kb = self.hub.devices.keyboard # Start by running the eye tracker default setup procedure. tracker.runSetupProcedure() # Create a psychopy window, full screen resolution, full screen mode... # res = display.getPixelResolution() window = visual.Window(res, monitor=display.getPsychopyMonitorName(), units=display.getCoordinateType(), fullscr=True, allowGUI=False, screen=0) # Create a dict of image stim for trials and a gaze blob to show gaze position. # display_coord_type = display.getCoordinateType() image_cache = dict() image_names = [ 'canal.jpg', 'fall.jpg', 'party.jpg', 'swimming.jpg', 'lake.jpg' ] for iname in image_names: image_cache[iname] = visual.ImageStim(window, image=os.path.join( './images/', iname), name=iname, units=display_coord_type) gaze_dot = visual.GratingStim(window, tex=None, mask="gauss", pos=(0, 0), size=(66, 66), color='green', units=display_coord_type) instructions_text_stim = visual.TextStim(window, text='', pos=[0, 0], height=24, color=[-1, -1, -1], colorSpace='rgb', wrapWidth=window.size[0] * .9) # Update Instruction Text and display on screen. # Send Message to ioHub DataStore with Exp. Start Screen display time. # instuction_text = "Press Any Key to Start Experiment." instructions_text_stim.setText(instuction_text) instructions_text_stim.draw() flip_time = window.flip() self.hub.sendMessageEvent(text="EXPERIMENT_START", sec_time=flip_time) # wait until a key event occurs after the instructions are displayed self.hub.clearEvents('all') kb.waitForPresses() # Send some information to the ioHub DataStore as experiment messages # including the eye tracker being used for this session. # self.hub.sendMessageEvent(text="IO_HUB EXPERIMENT_INFO START") self.hub.sendMessageEvent(text="ioHub Experiment started {0}".format( getCurrentDateTimeString())) self.hub.sendMessageEvent( text="Experiment ID: {0}, Session ID: {1}".format( self.hub.experimentID, self.hub.experimentSessionID)) self.hub.sendMessageEvent( text="Stimulus Screen ID: {0}, Size (pixels): {1}, CoordType: {2}". format(display.getIndex(), display.getPixelResolution(), display.getCoordinateType())) self.hub.sendMessageEvent( text="Calculated Pixels Per Degree: {0} x, {1} y".format( *display.getPixelsPerDegree())) self.hub.sendMessageEvent(text="Eye Tracker being Used: {0}".format( selected_eyetracker_name)) self.hub.sendMessageEvent(text="IO_HUB EXPERIMENT_INFO END") self.hub.clearEvents('all') t = 0 for trial in trials: # Update the instruction screen text... # instuction_text = "Press Space Key To Start Trial %d" % t instructions_text_stim.setText(instuction_text) instructions_text_stim.draw() flip_time = window.flip() self.hub.sendMessageEvent(text="EXPERIMENT_START", sec_time=flip_time) # wait until a space key event occurs after the instructions are displayed kb.waitForPresses(keys=' ') # So request to start trial has occurred... # Clear the screen, start recording eye data, and clear all events # received to far. # flip_time = window.flip() trial['session_id'] = self.hub.getSessionID() trial['trial_id'] = t + 1 trial['TRIAL_START'] = flip_time self.hub.sendMessageEvent(text="TRIAL_START", sec_time=flip_time) self.hub.clearEvents('all') tracker.setRecordingState(True) # Get the image name for this trial # imageStim = image_cache[trial['IMAGE_NAME']] # Loop until we get a keyboard event # run_trial = True while run_trial is True: # Get the latest gaze position in dispolay coord space.. # gpos = tracker.getLastGazePosition() if isinstance(gpos, (tuple, list)): # If we have a gaze position from the tracker, draw the # background image and then the gaze_cursor. # gaze_dot.setPos(gpos) imageStim.draw() gaze_dot.draw() else: # Otherwise just draw the background image. # imageStim.draw() # flip video buffers, updating the display with the stim we just # updated. # flip_time = window.flip() # Send a message to the ioHub Process / DataStore indicating # the time the image was drawn and current position of gaze spot. # if isinstance(gpos, (tuple, list)): self.hub.sendMessageEvent("IMAGE_UPDATE %s %.3f %.3f" % (iname, gpos[0], gpos[1]), sec_time=flip_time) else: self.hub.sendMessageEvent("IMAGE_UPDATE %s [NO GAZE]" % (iname), sec_time=flip_time) # Check any new keyboard char events for a space key. # If one is found, set the trial end variable. # if ' ' in kb.getPresses(): run_trial = False # So the trial has ended, send a message to the DataStore # with the trial end time and stop recording eye data. # In this example, we have no use for any eye data between trials, so why save it. # flip_time = window.flip() trial['TRIAL_END'] = flip_time self.hub.sendMessageEvent(text="TRIAL_END %d" % t, sec_time=flip_time) tracker.setRecordingState(False) # Save the Experiment Condition Variable Data for this trial to the # ioDataStore. # self.hub.addTrialHandlerRecord(trial) self.hub.clearEvents('all') t += 1 # Disconnect the eye tracking device. # tracker.setConnectionState(False) # Update the instruction screen text... # instuction_text = "Press Any Key to Exit Demo" instructions_text_stim.setText(instuction_text) instructions_text_stim.draw() flip_time = window.flip() self.hub.sendMessageEvent(text="SHOW_DONE_TEXT", sec_time=flip_time) # wait until any key is pressed kb.waitForPresses() # So the experiment is done, all trials have been run. # Clear the screen and show an 'experiment done' message using the # instructionScreen state. What for the trigger to exit that state. # (i.e. the space key was pressed) # self.hub.sendMessageEvent(text='EXPERIMENT_COMPLETE')
def run(self,*args): """ The run method contains your experiment logic. It is equal to what would be in your main psychopy experiment script.py file in a standard psychopy experiment setup. That is all there is too it really. """ exp_conditions=importConditions('trial_conditions.xlsx') trials = TrialHandler(exp_conditions,1) # Inform the ioDataStore that the experiment is using ac # TrialHandler. The ioDataStore will create a table # which can be used to record the actual trial variable values (DV or IV) # in the order run / collected. # self.hub.createTrialHandlerRecordTable(trials) selected_eyetracker_name=args[0] # Let's make some short-cuts to the devices we will be using in this 'experiment'. tracker=self.hub.devices.tracker display=self.hub.devices.display kb=self.hub.devices.keyboard # Start by running the eye tracker default setup procedure. tracker.runSetupProcedure() # Create a psychopy window, full screen resolution, full screen mode... # res=display.getPixelResolution() window=visual.Window(res,monitor=display.getPsychopyMonitorName(), units=display.getCoordinateType(), fullscr=True, allowGUI=False, screen= 0 ) # Create a dict of image stim for trials and a gaze blob to show gaze position. # display_coord_type=display.getCoordinateType() image_cache=dict() image_names=['canal.jpg','fall.jpg','party.jpg','swimming.jpg','lake.jpg'] for iname in image_names: image_cache[iname]=visual.ImageStim(window, image=os.path.join('./images/',iname), name=iname,units=display_coord_type) gaze_dot =visual.GratingStim(window,tex=None, mask="gauss", pos=(0,0 ),size=(66,66),color='green', units=display_coord_type) instructions_text_stim = visual.TextStim(window, text='', pos = [0,0], height=24, color=[-1,-1,-1], colorSpace='rgb',alignHoriz='center', alignVert='center',wrapWidth=window.size[0]*.9) # Update Instruction Text and display on screen. # Send Message to ioHub DataStore with Exp. Start Screen display time. # instuction_text="Press Any Key to Start Experiment." instructions_text_stim.setText(instuction_text) instructions_text_stim.draw() flip_time=window.flip() self.hub.sendMessageEvent(text="EXPERIMENT_START",sec_time=flip_time) # wait until a key event occurs after the instructions are displayed self.hub.clearEvents('all') kb.waitForPresses() # Send some information to the ioHub DataStore as experiment messages # including the eye tracker being used for this session. # self.hub.sendMessageEvent(text="IO_HUB EXPERIMENT_INFO START") self.hub.sendMessageEvent(text="ioHub Experiment started {0}".format(getCurrentDateTimeString())) self.hub.sendMessageEvent(text="Experiment ID: {0}, Session ID: {1}".format(self.hub.experimentID,self.hub.experimentSessionID)) self.hub.sendMessageEvent(text="Stimulus Screen ID: {0}, Size (pixels): {1}, CoordType: {2}".format(display.getIndex(),display.getPixelResolution(),display.getCoordinateType())) self.hub.sendMessageEvent(text="Calculated Pixels Per Degree: {0} x, {1} y".format(*display.getPixelsPerDegree())) self.hub.sendMessageEvent(text="Eye Tracker being Used: {0}".format(selected_eyetracker_name)) self.hub.sendMessageEvent(text="IO_HUB EXPERIMENT_INFO END") self.hub.clearEvents('all') t=0 for trial in trials: # Update the instruction screen text... # instuction_text="Press Space Key To Start Trial %d"%t instructions_text_stim.setText(instuction_text) instructions_text_stim.draw() flip_time=window.flip() self.hub.sendMessageEvent(text="EXPERIMENT_START",sec_time=flip_time) # wait until a space key event occurs after the instructions are displayed kb.waitForPresses(keys=' ') # So request to start trial has occurred... # Clear the screen, start recording eye data, and clear all events # received to far. # flip_time=window.flip() trial['session_id']=self.hub.getSessionID() trial['trial_id']=t+1 trial['TRIAL_START']=flip_time self.hub.sendMessageEvent(text="TRIAL_START",sec_time=flip_time) self.hub.clearEvents('all') tracker.setRecordingState(True) # Get the image name for this trial # imageStim=image_cache[trial['IMAGE_NAME']] # Loop until we get a keyboard event # run_trial=True while run_trial is True: # Get the latest gaze position in dispolay coord space.. # gpos=tracker.getLastGazePosition() if isinstance(gpos,(tuple,list)): # If we have a gaze position from the tracker, draw the # background image and then the gaze_cursor. # gaze_dot.setPos(gpos) imageStim.draw() gaze_dot.draw() else: # Otherwise just draw the background image. # imageStim.draw() # flip video buffers, updating the display with the stim we just # updated. # flip_time=window.flip() # Send a message to the ioHub Process / DataStore indicating # the time the image was drawn and current position of gaze spot. # if isinstance(gpos,(tuple,list)): self.hub.sendMessageEvent("IMAGE_UPDATE %s %.3f %.3f"%(iname,gpos[0],gpos[1]),sec_time=flip_time) else: self.hub.sendMessageEvent("IMAGE_UPDATE %s [NO GAZE]"%(iname),sec_time=flip_time) # Check any new keyboard char events for a space key. # If one is found, set the trial end variable. # if ' ' in kb.getPresses(): run_trial = False # So the trial has ended, send a message to the DataStore # with the trial end time and stop recording eye data. # In this example, we have no use for any eye data between trials, so why save it. # flip_time=window.flip() trial['TRIAL_END']=flip_time self.hub.sendMessageEvent(text="TRIAL_END %d"%t,sec_time=flip_time) tracker.setRecordingState(False) # Save the Experiment Condition Variable Data for this trial to the # ioDataStore. # self.hub.addTrialHandlerRecord(trial) self.hub.clearEvents('all') t+=1 # Disconnect the eye tracking device. # tracker.setConnectionState(False) # Update the instruction screen text... # instuction_text="Press Any Key to Exit Demo" instructions_text_stim.setText(instuction_text) instructions_text_stim.draw() flip_time=window.flip() self.hub.sendMessageEvent(text="SHOW_DONE_TEXT",sec_time=flip_time) # wait until any key is pressed kb.waitForPresses() # So the experiment is done, all trials have been run. # Clear the screen and show an 'experiment done' message using the # instructionScreen state. What for the trigger to exit that state. # (i.e. the space key was pressed) # self.hub.sendMessageEvent(text='EXPERIMENT_COMPLETE')
# Display calibration gfx window and run calibration. result = tracker.runSetupProcedure() print("Calibration returned: ", result) # Maximize the PsychoPy window if needed showWindow(window) flip_time = window.flip() io_hub.sendMessageEvent(text="EXPERIMENT_START", sec_time=flip_time) # Send some information to the ioDataStore as experiment messages, # including the experiment and session id's, the calculated pixels per # degree, display resolution, etc. # io_hub.sendMessageEvent(text="IO_HUB EXPERIMENT_INFO START") io_hub.sendMessageEvent( text="ioHub Experiment started {0}".format(getCurrentDateTimeString())) io_hub.sendMessageEvent(text="Experiment ID: {0}, Session ID: {1}".format( io_hub.experimentID, io_hub.experimentSessionID)) io_hub.sendMessageEvent(text="Stimulus Screen ID: {0}, " "Size (pixels): {1}, CoordType: {2}".format( display.getIndex(), display.getPixelResolution( ), display.getCoordinateType())) io_hub.sendMessageEvent( text="Calculated Pixels Per Degree: {0} x, {1} y".format( *display.getPixelsPerDegree())) io_hub.sendMessageEvent(text="IO_HUB EXPERIMENT_INFO END") io_hub.clearEvents('all') # For each trial in the set of trials within the current block. #
def run(self,*args): """ The run method contains your experiment logic. In this example we: 1) Load an xlsx file containing the trial conditions for use during the experiment. All DV's and IV's to be used or updated for each trial must be specified as columns in the xlsx file. 2) Inform the ioDataStore of the trial conditions to be used, resulting in the creation of an experiment specific results table, with a field for each DV and IV defined in the xls file. 3) Run the eye tracking device's runSetupProcedure(), which allows the calibration, validation, etc. of the eye tracking system being used. 4) Create the experiment runtime graphics, including creating a cache of images to be displayed for each trial of the experiment. 5) Run the experimental block of trials of the demo. Each trial sequence consists of: a) The participant pressing the SPACE key to start the trial. b) Randomly displaying one of the background images for a trial. c) Starting recording of data from the eye tracker. d) Displaying a gaze contingent dot located at the gaze position reported by the eye tracker. e) Ending each trial by pressing the SPACE key. f) Sending any condition variable value changes for that trial to the ioDataStore for easy future selection of device events recorded during the trial or for specific condition variable values. g) Stopping of event recording on the eye tracker device. """ exp_conditions=importConditions('trial_conditions.xlsx') trials = TrialHandler(exp_conditions,1) # Inform the ioDataStore that the experiment is using a # TrialHandler. The ioDataStore will create a table # which can be used to record the actual trial variable values (DV or IV) # in the order run / collected. # self.hub.createTrialHandlerRecordTable(trials) # Let's make some short-cuts to the devices we will be using # in this demo. try: tracker=self.hub.devices.tracker except Exception: # No eye tracker config found in iohub_config.yaml from psychopy.iohub.util import MessageDialog md = MessageDialog(title="No Eye Tracker Configuration Found", msg="Update the iohub_config.yaml file by " "uncommenting\nthe appropriate eye tracker " "config lines.\n\nPress OK to exit demo.", showButtons=MessageDialog.OK_BUTTON, dialogType=MessageDialog.ERROR_DIALOG, allowCancel=False, display_index=0) md.show() return 1 display=self.hub.devices.display kb=self.hub.devices.keyboard mouse=self.hub.devices.mouse # Start by running the eye tracker default setup procedure. # The details of the setup procedure (calibration, validation, etc) # are unique to each implementation of the Common Eye Tracker Interface. # All have the common end goal of calibrating the eye tracking system # prior to data collection. # Please see the eye tracker interface implementation details for the # hardware being used at: # http://www.isolver-solutions.com/iohubdocs/iohub/api_and_manual/device_details/eyetracker.html#eye-tracking-hardware-implementations # tracker.runSetupProcedure() # Create a psychopy window for the experiment graphics, # ioHub supports the use of one full screen window during # the experiment runtime. (If you are using a window at all). # res=display.getPixelResolution() # Current pixel resolution of the Display to be used coord_type=display.getCoordinateType() window=visual.Window(res,monitor=display.getPsychopyMonitorName(), # name of the PsychoPy Monitor Config file if used. units=coord_type, # coordinate space to use. fullscr=True, # We need full screen mode. allowGUI=False, # We want it to be borderless screen= display.getIndex() # The display index to use, assuming a multi display setup. ) # Create a dict of image stim for trials and a gaze blob to show the # reported gaze position with. # image_cache=dict() image_names=['canal.jpg','fall.jpg','party.jpg','swimming.jpg','lake.jpg'] for iname in image_names: image_cache[iname]=visual.ImageStim(window, image=os.path.join('./images/',iname), name=iname,units=coord_type) # Create a circle to use for the Gaze Cursor. Current units assume pix. # gaze_dot =visual.GratingStim(window,tex=None, mask="gauss", pos=(0,0 ),size=(66,66),color='green', units=coord_type) # Create a Text Stim for use on /instruction/ type screens. # Current units assume pix. instructions_text_stim = visual.TextStim(window, text='', pos = [0,0], height=24, color=[-1,-1,-1], colorSpace='rgb', alignHoriz='center', alignVert='center', wrapWidth=window.size[0]*.9) # Update Instruction Text and display on screen. # Send Message to ioHub DataStore with Exp. Start Screen display time. # instuction_text="Press Any Key to Start Experiment." instructions_text_stim.setText(instuction_text) instructions_text_stim.draw() flip_time=window.flip() self.hub.sendMessageEvent(text="EXPERIMENT_START",sec_time=flip_time) # Wait until a key event occurs after the instructions are displayed self.hub.clearEvents('all') kb.waitForPresses() # Send some information to the ioDataStore as experiment messages, # including the experiment and session id's, the calculated pixels per # degree, display resolution, etc. # self.hub.sendMessageEvent(text="IO_HUB EXPERIMENT_INFO START") self.hub.sendMessageEvent(text="ioHub Experiment started {0}".format(getCurrentDateTimeString())) self.hub.sendMessageEvent(text="Experiment ID: {0}, Session ID: {1}".format(self.hub.experimentID,self.hub.experimentSessionID)) self.hub.sendMessageEvent(text="Stimulus Screen ID: {0}, Size (pixels): {1}, CoordType: {2}".format(display.getIndex(),display.getPixelResolution(),display.getCoordinateType())) self.hub.sendMessageEvent(text="Calculated Pixels Per Degree: {0} x, {1} y".format(*display.getPixelsPerDegree())) self.hub.sendMessageEvent(text="IO_HUB EXPERIMENT_INFO END") self.hub.clearEvents('all') # For each trial in the set of trials within the current block. # t=0 for trial in trials: # Update the instruction screen text to indicate # a trial is about to start. # instuction_text="Press Space Key To Start Trial %d"%t instructions_text_stim.setText(instuction_text) instructions_text_stim.draw() flip_time=window.flip() self.hub.sendMessageEvent(text="EXPERIMENT_START",sec_time=flip_time) # Wait until a space key press event occurs after the # start trial instuctions have been displayed. # self.hub.clearEvents('all') kb.waitForPresses(keys=[' ',]) # Space Key has been pressed, start the trial. # Set the current session and trial id values to be saved # in the ioDataStore for the upcoming trial. # trial['session_id']=self.hub.getSessionID() trial['trial_id']=t+1 # Send a msg to the ioHub indicating that the trial started, and the time of # the first retrace displaying the trial stm. # self.hub.sendMessageEvent(text="TRIAL_START",sec_time=flip_time) # Start Recording Eye Data # tracker.setRecordingState(True) # Get the image stim for this trial. # imageStim=image_cache[trial['IMAGE_NAME']] imageStim.draw() flip_time=window.flip() # Clear all the events received prior to the trial start. # self.hub.clearEvents('all') # Send a msg to the ioHub indicating that the trial started, # and the time of the first retrace displaying the trial stim. # self.hub.sendMessageEvent(text="TRIAL_START",sec_time=flip_time) # Set the value of the trial start variable for this trial # trial['TRIAL_START']=flip_time # Loop until we get a keyboard event # run_trial=True while run_trial is True: # Get the latest gaze position in display coord space.. # gpos=tracker.getPosition() if type(gpos) in [tuple,list]: # If we have a gaze position from the tracker, # redraw the background image and then the # gaze_cursor at the current eye position. # gaze_dot.setPos([gpos[0],gpos[1]]) imageStim.draw() gaze_dot.draw() else: # Otherwise just draw the background image. # This will remove the gaze cursor from the screen # when the eye tracker is not successfully # tracking eye position. # imageStim.draw() # Flip video buffers, displaying the stim we just # updated. # flip_time=window.flip() # Send an experiment message to the ioDataStore # indicating the time the image was drawn and # current position of gaze spot. # if type(gpos) in [tuple,list]: self.hub.sendMessageEvent("IMAGE_UPDATE %s %.3f %.3f"%( trial['IMAGE_NAME'],gpos[0],gpos[1]), sec_time=flip_time) else: self.hub.sendMessageEvent("IMAGE_UPDATE %s [NO GAZE]"%( trial['IMAGE_NAME']), sec_time=flip_time) # Check any new keyboard press events by a space key. # If one is found, set the trial end variable and break. # from the loop if kb.getPresses(keys=[' ',]): run_trial=False break # The trial has ended, so update the trial end time condition value, # and send a message to the ioDataStore with the trial end time. # flip_time=window.flip() trial['TRIAL_END']=flip_time self.hub.sendMessageEvent(text="TRIAL_END %d"%t,sec_time=flip_time) # Stop recording eye data. # In this example, we have no use for any eye data # between trials, so why save it. # tracker.setRecordingState(False) # Save the experiment condition variable values for this # trial to the ioDataStore. # self.hub.addTrialHandlerRecord(trial) # Clear all event buffers # self.hub.clearEvents('all') t+=1 # All trials have been run, so end the experiment. # flip_time=window.flip() self.hub.sendMessageEvent(text='EXPERIMENT_COMPLETE',sec_time=flip_time) # Disconnect the eye tracking device. # tracker.setConnectionState(False) # The experiment is done, all trials have been run. # Clear the screen and show an 'experiment done' message using the # instructionScreen text. # instuction_text="Press Any Key to Exit Demo" instructions_text_stim.setText(instuction_text) instructions_text_stim.draw() flip_time=window.flip() self.hub.sendMessageEvent(text="SHOW_DONE_TEXT",sec_time=flip_time) self.hub.clearEvents('all') # wait until any key is pressed kb.waitForPresses()
def run(self, *args): """ The run method contains your experiment logic. It is equal to what would be in your main psychopy experiment script.py file in a standard psychopy experiment setup. That is all there is too it really. """ global subj_id global con from psychopy.iohub import module_directory exp_script_dir = module_directory(self.run) #exp_conditions = importConditions(os.path.join(exp_script_dir, # 'trial_conditions.xlsx')) #TrialHandler(trialList, nReps, method=’random’, dataTypes=None, extraInfo=None, seed=None, originPath=None, name=’‘, autoLog=True) #trials = TrialHandler(exp_conditions, 1) # 1 - number of repetitions, how do we use conditions lets try to comment out # Inform the ioDataStore that the experiment is using a # TrialHandler. The ioDataStore will create a table # which can be used to record the actual trial variable values (DV or IV) # in the order run / collected. # #self.hub.createTrialHandlerRecordTable(trials) #Use Esc to quit, it will be called at some stages during the experiment def _checkQuit(key): if key[0] == 'escape': os._exit(1) core.quit() ########### #### Experiment functions ########### def to_output(subject_id, decision, trigger_value, i_d, output_file_dir, reaction_time, reaction_time_decision_scren): import os.path global con is_exist = False if os.path.exists(output_file_dir): is_exist = True # Add header to the output file if it is the first time to write to it... if not is_exist: output_data_headers = [ 'Subject_id', 'Condition', 'Decision', 'Trigger', 'Item number', 'c1', 'c2', 'c3', 'c4', 'c5', 'c6', 'm1', 'm2', 'm3', 'm4', 'm5', 'm6', 'Reaction time', 'Reaction time since decision screen start' ] # Python 2 #with open(output_file_dir, 'ab') as f: # Python 3 with open(output_file_dir, 'a', newline='') as f: writer = csv.writer(f) if not is_exist: writer.writerows([output_data_headers]) writer.writerows([[ subject_id, con, decision, trigger_value, i_d[0], i_d[1], i_d[2], i_d[3], i_d[4], i_d[5], i_d[6], i_d[7], i_d[8], i_d[9], i_d[10], i_d[11], i_d[12], reaction_time, reaction_time_decision_scren ]]) def to_output_eyetracking(subject_id, x, y, gazetime, trigger, output_eye_file_dir): import os.path is_exist = False if os.path.exists(output_eye_file_dir): is_exist = True # Add header to the output file if it is the first time to write to it... if not is_exist: output_data_headers = [ 'Subject_id', 'x', 'y', 'gazetime', 'Trigger' ] # Python 2 #with open(output_eye_file_dir, 'ab') as f: # Python 3 with open(output_eye_file_dir, 'a', newline='') as f: writer = csv.writer(f) if not is_exist: writer.writerows([output_data_headers]) writer.writerows([[subject_id, x, y, gazetime, trigger]]) def row_to_condition(row): txt = row #array_column_names #print(txt.split(';')) a = np.empty(len(txt.split(';'))) a = txt.split(';') #print('a', a) return a def read_input_file(csv_dir, item_number): global subject_id i = 0 with open(csv_dir, 'rt') as csvfile: spamreader = csv.reader(csvfile, delimiter='\n', quotechar='|') #print(spamreader) for row in spamreader: i = i + 1 if (i == item_number): #print('row', row) return row return 0 def monitor_coordinate_check(win): for i in range(90): texti = str(-450 + 10 * i) #-display_resolution[1]/2 pixel_line_y = visual.ShapeStim( win, units='pix', lineWidth=1.5, lineColor=(55, 255, 255), lineColorSpace='rgb255', vertices=((-750, -450 + 10 * i), (750, -450 + 10 * i)), closeShape=False, pos=(0, 0), size=1.2) pixel_name_y = visual.TextStim(win, text='y=' + texti, height=10, units='pix', pos=[0, -450 + 10 * i], color=[255, 55, 255], colorSpace='rgb255') texti = str(-800 + i * 20) #-display_resolution[0]/2 pixel_line_x = visual.ShapeStim( win, units='pix', lineWidth=1.5, lineColor=(155, 255, 55), lineColorSpace='rgb255', vertices=((-800 + i * 20, -450), (-800 + i * 20, 450)), closeShape=False, pos=(0, 0), size=1) #what size param pixel_name_x = visual.TextStim(win, text=texti, height=9, units='pix', pos=[-800 + i * 20, 0], color=[255, 55, 55], colorSpace='rgb255') pixel_line_x.draw() pixel_line_y.draw() pixel_name_x.draw() pixel_name_y.draw() win.flip() def draw_input(win, item_array_text, item_array_x, item_array_y): global con item_left = item_array_text[1:7] item_right = item_array_text[7:13] print(item_array_text, item_left, item_right) random.Random(con).shuffle(item_left) random.Random(con).shuffle(item_right) print(item_array_text, item_left, item_right) item_array_text_shuffled = item_left + item_right print(item_array_text_shuffled) for i in range(len(item_array_x)): #print(item_array_x[i], item_array_y[i], i, len(item_array_x), len(item_array_text), item_array_text) whitebox = visual.ShapeStim( win, units='pix', lineWidth=1.5, lineColor=(255, 255, 255), lineColorSpace='rgb255', vertices=((item_array_x[i] + 20, item_array_y[i] + 20), (item_array_x[i] + 20, item_array_y[i] - 20), (item_array_x[i] - 20, item_array_y[i] - 20), (item_array_x[i] - 20, item_array_y[i] + 20)), closeShape=True, fillColor=(255, 255, 255), fillColorSpace='rgb255', pos=(0, 0), size=1) #what size param #uncomment white box in case want to create different background on values #whitebox.draw() item_value = visual.TextStim( win, text=item_array_text_shuffled[i], height=14, units= 'pix', #here we use i+1 because the first number is numbers item pos=[item_array_x[i], item_array_y[i]], color=[0, 0, 0], colorSpace='rgb255') item_value.draw() win.flip(clearBuffer=False) def logs_windows(log_text, log_key_to_proceed): start_message = visual.TextStim(win, text=log_text, pos=[0, 0], height=35, color=[255, 255, 255], colorSpace='rgb255', wrapWidth=win.size[0] * .9) start_message.draw() win.flip() core.wait(0.2) key = event.waitKeys(keyList=[log_key_to_proceed]) return key def instructions_blank_screen(win, output_eye_dir): #uncomment in case want to draw gaze dot '''timer = core.Clock() timer.add(0.5) while timer.getTime()<0: print('precise timing bl', timer.getTime())''' #draw_gaze_dot(win, 1001, 0.5, output_eye_dir) self.hub.clearEvents('all') def instructions_fixation_cross(win, output_eye_dir): #inst_dir = 'Instructions\\fixation_cross.jpg' #instr=visual.ImageStim(win,image=inst_dir, units='pix', size = display_resolution) #instr.draw() fixation_cross = visual.TextStim(win, text='+', pos=[-595, 345], height=54, color=[-1, -1, -1], colorSpace='rgb') fixation_cross.autoDraw = True win.flip() #uncomment in case we want to see the fixation # draw_gaze_dot(win, 2001, 0.5, output_eye_dir) #change 0.5 seconds #draw_gaze_dot(win, 2001, 0.5, output_eye_dir) #change 0.5 seconds #uncomment in case of coordinate monitor greed #monitor_coordinate_check(win) #comment in case of monitor coordinate check #win.flip() fixation_cross.autoDraw = False self.hub.clearEvents('all') def draw_table_lines(win): global con print('con', con) table_rectangle = visual.ShapeStim( win, units='pix', lineWidth=1.5, lineColor=(25, 25, 25), lineColorSpace='rgb255', vertices=((-225, 375), (200, 375), (200, -395), (-225, -395)), closeShape=True, pos=(0, 0), size=1) table_rectangle2 = visual.ShapeStim( win, units='pix', lineWidth=1.5, lineColor=(25, 25, 25), lineColorSpace='rgb255', vertices=((235, 375), (650, 375), (650, -395), (235, -395)), closeShape=True, pos=(0, 0), size=1) line1 = visual.ShapeStim(win, units='pix', lineWidth=1.5, lineColor=(25, 25, 25), lineColorSpace='rgb255', vertices=((-225, 327), (200, 327)), pos=(0, 0), size=1) line2 = visual.ShapeStim(win, units='pix', lineWidth=1.5, lineColor=(25, 25, 25), lineColorSpace='rgb255', vertices=((235, 327), (650, 327)), pos=(0, 0), size=1) line_dotted1 = visual.Line(win, start=(-660, 280), end=(650, 280), lineColor=(25, 25, 25), lineColorSpace='rgb255') line_dotted2 = visual.Line(win, start=(-660, 148), end=(650, 148), lineColor=(25, 25, 25), lineColorSpace='rgb255') line_dotted3 = visual.Line(win, start=(-660, 40), end=(650, 40), lineColor=(25, 25, 25), lineColorSpace='rgb255') line_dotted4 = visual.Line(win, start=(-660, -70), end=(650, -70), lineColor=(25, 25, 25), lineColorSpace='rgb255') line_dotted5 = visual.Line(win, start=(-660, -175), end=(650, -175), lineColor=(25, 25, 25), lineColorSpace='rgb255') line_dotted6 = visual.Line(win, start=(-660, -284), end=(650, -284), lineColor=(25, 25, 25), lineColorSpace='rgb255') text = [ 'Number of participating countries', 'Costs to average household per \n month', 'Share of emission represented by \nparticipating countries', 'Distribution of cost from \nimplementing the agreement', 'Sanctions for missing emission \nreduction targets', 'Monitoring: Emission reductions \nwill be monitored by' ] #shuffle text, put text items in an array random.Random(con).shuffle(text) for i in range(6): start_message = visual.TextStim(win, text=text[i], pos=[-640, 215 - i * 112], height=24, color=[25, 25, 25], colorSpace='rgb255', wrapWidth=win.size[0] * .9, alignHoriz='left') start_message.draw() agreement_message = ('Agreement 1', 'Agreement 2') agreement_message1 = visual.TextStim(win, text=agreement_message[0], pos=[-15, 355], height=24, color=[25, 25, 25], colorSpace='rgb255', wrapWidth=win.size[0] * .9) agreement_message1.draw() agreement_message2 = visual.TextStim(win, text=agreement_message[1], pos=[440, 355], height=24, color=[25, 25, 25], colorSpace='rgb255', wrapWidth=win.size[0] * .9) agreement_message2.draw() table_rectangle.draw() table_rectangle2.draw() line1.draw() line2.draw() line_dotted1.draw() line_dotted2.draw() line_dotted3.draw() line_dotted4.draw() line_dotted5.draw() line_dotted6.draw() def instructions_choice_decision(win, item_list_text, output_eye_dir): #uncomment in case we want to compare with table from the presentation experiment requirements #inst_dir = 'Instructions\\choice_decision.jpg' #instr=visual.ImageStim(win,image=inst_dir, units='pix', size = display_resolution) #instr.draw() item_array_x = np.array( [-15, -15, -15, -15, -15, -15, 445, 445, 445, 445, 445, 445]) item_array_y = np.array([ 215, 105, -5, -115, -225, -335, 215, 105, -5, -115, -225, -335 ]) draw_table_lines(win) draw_input(win, item_list_text, item_array_x, item_array_y) (choice, time_all, time_trial) = draw_gaze_dot(win, 3001, 10000, output_eye_dir) #comment in case want to see gazedot return (choice, time_all, time_trial) def draw_trigger(win, tracker, trigger, item_number, output_file_dir, output_eye_dir): global input_file_dir choice = 0 flip_time = win.flip() self.hub.sendMessageEvent(text="TRIAL_START", sec_time=flip_time) self.hub.clearEvents('all') tracker.setRecordingState(True) #setting it every time here - why #controller.start_recording('real-test.tsv') #tracker.setTriggerValue(trigger) input_to_make_decision = read_input_file(input_file_dir, item_number) input_to_make_decision_split = ''.join(input_to_make_decision) input_to_make_decision_split = row_to_condition( input_to_make_decision_split) #default gazetime and eyedata x, y, gazetime = 'did not catch', 'eyedata, possibly blinked', 30 if (trigger == 1001): instructions_blank_screen(win, output_eye_dir) if (trigger == 2001): instructions_fixation_cross( win, output_eye_dir, ) if (trigger == 3001): choice, choice_time_whole, choice_time_decision_screen = instructions_choice_decision( win, input_to_make_decision_split, output_eye_dir) to_output(subject_id, choice, trigger, input_to_make_decision_split, output_file_dir, choice_time_whole, choice_time_decision_screen) flip_time = win.flip() self.hub.sendMessageEvent(text="TRIAL_END %d" % t, sec_time=flip_time) tracker.setRecordingState(False) # stop recording #controller.stop_recording() self.hub.clearEvents('all') return choice def draw_gaze_dot(win, trigger, time, output_eye_dir): #try to paint gaze position #if we are not using draw gaze dot, then we would not use 'c', 'm' to transfer through the blank and fixation_cross #screens. stime = getTime() #print(stime) while getTime() - stime < time: gpos = tracker.getPosition() #print('start time', stime, 'actual time', getTime(), 'difference', getTime()-stime, 'until <',time) if (gpos != None): start_message = visual.TextStim(win, text='+', pos=[gpos[0], gpos[1]], height=10, color=[-1, -1, -1], colorSpace='rgb', wrapWidth=win.size[0]) start_message.draw() #start_message = visual.TextStim(win, text=str(gpos), pos = [gpos[1],gpos[0]], height=35,color=[-1,-1,-1],colorSpace='rgb',wrapWidth=win.size[0]*.9) #start_message.draw() win.flip(clearBuffer=False) to_output_eyetracking(subject_id, gpos[0], gpos[1], getTime(), trigger, output_eye_dir) core.wait(0.001) else: to_output_eyetracking( subject_id, 'did not catch eye data, possibly blinked', 'or corrupted', getTime(), trigger, output_eye_dir ) #getTime gives current time, here should probably print getTime from inside while self.hub.clearEvents('all') if (trigger == 3001): key = event.getKeys(keyList=['c', 'm']) if key != []: print('choice key', key, trigger, getTime()) return (key[0], getTime(), getTime() - stime) self.hub.clearEvents('all') print('events after clear events', event.getKeys(keyList=['c', 'm'])) return 0 selected_eyetracker_name = args[0] # Let's make some short-cuts to the devices we will be using in this 'experiment'. tracker = self.hub.devices.tracker display = self.hub.devices.display kb = self.hub.devices.keyboard mouse = self.hub.devices.mouse # Start by running the eye tracker default setup procedure. #tracker.runSetupProcedure() # Create a psychopy window, full screen resolution, full screen mode... display_coord_type = display.getCoordinateType() display_resolution = display.getPixelResolution() # it is recommended to use pixle as unit espically if you are using eye tracker, because the eyetracker returns the readings in pixel win = visual.Window(display_resolution, monitor=display.getPsychopyMonitorName(), units='pix', fullscr=True, screen=display.getIndex(), waitBlanking=False) #color="white" # Hide the 'system mouse cursor'. # would need it for later #mouse.setSystemCursorVisibility(False) event.Mouse(visible=False) controller = tobii_controller(win) #------------------------------------------------------------Experiment begins ---------------------------------------------------------------------------------------------- #get time in nice format to name the csv file localtime = time.asctime(time.localtime(time.time())) localtime = localtime[11:16] + 'pm-' + localtime[4:10] localtime = localtime.replace(":", "_").replace(" ", "_") #create csv file csv_eye_output = 'Exp Results\\' + subject_id + '_eyetracking_output' + localtime + '.csv' csv_experiment_output = 'Exp Results\\' + subject_id + '_decision_output' + localtime + '.csv' controller.start_recording('data_raw\\_EXP_DTA.tsv') tracker.setRecordingState(True) #draw instruction before experiment start inst_dir = 'Instructions\\Inst_2.jpg' instr = visual.ImageStim(win, image=inst_dir, units='pix', size=display_resolution) instr.draw() '''inst1 = visual.TextStim(win, text='Instruction', pos = [0,0], height=24, color=[-1,-1,-1], colorSpace='rgb', alignHoriz='center', alignVert='center', wrapWidth=win.size[0]*.9)''' flip_time = win.flip() self.hub.sendMessageEvent(text="EXPERIMENT_START", sec_time=flip_time) self.hub.clearEvents('all') key = event.waitKeys(keyList=['space']) #------------------------------------------------------------Experiment trial testing ---------------------------------------------------------------------------------------------- # Send some information to the ioHub DataStore as experiment messages # including the eye tracker being used for this session. # self.hub.sendMessageEvent(text="IO_HUB EXPERIMENT_INFO START") self.hub.sendMessageEvent(text="ioHub Experiment started {0}".format( getCurrentDateTimeString())) self.hub.sendMessageEvent( text="Experiment ID: {0}, Session ID: {1}".format( self.hub.experimentID, self.hub.experimentSessionID)) self.hub.sendMessageEvent( text="Stimulus Screen ID: {0}, Size (pixels): {1}, CoordType: {2}". format(display.getIndex(), display.getPixelResolution(), display.getCoordinateType())) self.hub.sendMessageEvent( text="Calculated Pixels Per Degree: {0} x, {1} y".format( *display.getPixelsPerDegree())) self.hub.sendMessageEvent(text="Eye Tracker being Used: {0}".format( selected_eyetracker_name)) self.hub.sendMessageEvent(text="IO_HUB EXPERIMENT_INFO END") print('get current date time', "{0}".format(getCurrentDateTimeString())) print('experiment ID', self.hub.experimentID, 'experiment session ID', self.hub.experimentSessionID) print('display', "{0}".format(display.getIndex()), 'pixel resolution', "{0}".format(display.getPixelResolution()), 'coordinate type', "{0}".format(display.getCoordinateType())) print('pixels degree', "{0}".format(display.getPixelsPerDegree()), 'selected eyetracker', selected_eyetracker_name) self.hub.clearEvents('all') for t in range(2): #number of trials is 10 self.hub.sendMessageEvent(text="TRIAL_START") self.hub.clearEvents('all') #uncomment for trials, here item number 1 is used only for testing purposes item_number = random.randrange(2, 11, 1) #item_number = 2 trigger_value = 1001 draw_trigger(win, tracker, trigger_value, item_number, csv_experiment_output, csv_eye_output) #the row indexing starts from 2 trigger_value = 2001 draw_trigger(win, tracker, trigger_value, item_number, csv_experiment_output, csv_eye_output) trigger_value = 3001 draw_trigger(win, tracker, trigger_value, item_number, csv_experiment_output, csv_eye_output) flip_time = win.flip() self.hub.sendMessageEvent(text='TRIAL_END', sec_time=flip_time) self.hub.clearEvents('all') #------------------------------------------------------------Experiment ends ---------------------------------------------------------------------------------------------- # Disconnect the eye tracking device. # So the experiment is done, all trials have been run. # Clear the screen and show an 'experiment done' message using the # instructionScreen state. What for the trigger to exit that state. # (i.e. the space key was pressed) # flip_time = win.flip() self.hub.sendMessageEvent(text='EXPERIMENT_COMPLETE', sec_time=flip_time) tracker.setRecordingState(False) tracker.setConnectionState(False) # stop recording controller.stop_recording() # close the file controller.close() logs_windows( "Thank you for your participation! Press ''escape'' to exit", 'escape') print('checkquit escape') _checkQuit(key) self.hub.sendMessageEvent(text="SHOW_DONE_TEXT") tex1 = eventtxt.Eventtotext() print('tex1=eventtxt.Eventtotext()', tex1) #use try: would give an error in case of the not connected eye tracker at later stages tex1.convertToText(exp_script_dir, subject_id, localtime) self.hub.clearEvents('all') #self.hub.clearEvents('all', exp_script_dir) # MANAGER ERROR WHEN SENDING MSG:[Errno 9] Bad file descriptor #Warning: TimeoutExpired, Killing ioHub Server process. #ioHubExperimentRuntime.shutdown() #print(ioHubExperimentRuntime) win.close() self.hub.quit() #print('end of exp logic') ### End of experiment logic tsv_dir_1 = 'data_raw\\_Calib_DTA.tsv' csv_dir_1 = 'data_raw\\' + subject_id + '_Calib_DTA' + localtime + '.csv' tsv_dir_2 = 'data_raw\\_EXP_DTA.tsv' csv_dir_2 = 'data_raw\\' + subject_id + '_EXP_DTA' + localtime + '.csv' tsv_file_1 = open(tsv_dir_1) read_tsv_1 = csv.reader(tsv_file_1, delimiter="\t") data = list(csv.reader(tsv_file_1, delimiter="\t")) df = pd.read_csv(tsv_dir_1, delimiter="\t") df["Trigger"] = trigger_value df["Subject_ID"] = subject_id df["Calibration_status"] = calib_state df.to_csv(tsv_dir_1, index=False) df = pd.read_csv(tsv_dir_2, delimiter="\t") df["Trigger"] = trigger_value df["Subject_ID"] = subject_id df["Calibration_status"] = calib_state df.to_csv(tsv_dir_2, index=False) with open(csv_dir_1, 'w', newline='') as f: writer = csv.writer(f) writer.writerows(data) tsv_file_2 = open(tsv_dir_2) read_tsv = csv.reader(tsv_file_2, delimiter="\t") data = list(csv.reader(tsv_file_2, delimiter="\t")) with open(csv_dir_2, 'w', newline='') as f: writer = csv.writer(f) writer.writerows(data)