コード例 #1
0
    flip_time = win.flip() #this logs time between consecutive frame flips
    kb.waitForPresses(keys=' ')



###############----Trials----###############

#Display Instructions
showInstructions(text = instText)

#Tell ioHub we are starting
io.sendMessageEvent(text="EXPERIMENT_START", sec_time=flip_time)

#Pass some info to the ioHub regarding settings
io.sendMessageEvent(text="IO_HUB EXPERIMENT_INFO START") 
io.sendMessageEvent(text="ioHub Experiment started {0}".format(getCurrentDateTimeString()))
io.sendMessageEvent(text="Experiment ID: {0}, Session ID: {1}".format(io.experimentID,io.experimentSessionID))
io.sendMessageEvent(text="Stimulus Screen ID: {0}, Size (pixels): {1}, CoordType: {2}".format(display.getIndex(), res, units))
io.sendMessageEvent(text="Calculated Pixels Per Degree: {0} x, {1} y".format(*display.getPixelsPerDegree()))        
io.sendMessageEvent(text="IO_HUB EXPERIMENT_INFO END")

#Clear events before trial start
io.clearEvents('all')

#Begin Trial Sequence
t = 1
for thisTrial in trials: 

    #Reset resp and rt
    resp=None
    rt=None
コード例 #2
0
ファイル: run.py プロジェクト: Christings/deeplearning
    def run(self, *args):
        """
        The run method contains your experiment logic. In this example we:

        1) Load an xlsx file containing the trial conditions for use
           during the experiment. All DV's and IV's to be used or updated
           for each trial must be specified as columns in the xlsx file.
        2) Inform the ioDataStore of the trial conditions to be used, resulting in the
           creation of an experiment specific results table, with a field for each
           DV and IV defined in the xls file.
        3) Run the eye tracking device's runSetupProcedure(), which allows
           the calibration, validation, etc. of the eye tracking system being used.
        4) Create the experiment runtime graphics, including creating a cache of
           images to be displayed for each trial of the experiment.
        5) Run the experimental block of trials of the demo. Each trial sequence
           consists of:
               a) The participant pressing the SPACE key to start the trial.
               b) Randomly displaying one of the background images for a trial.
               c) Starting recording of data from the eye tracker.
               d) Displaying a gaze contingent dot located at the gaze position reported by the eye tracker.
               e) Ending each trial by pressing the SPACE key.
               f) Sending any condition variable value changes for that trial
                  to the ioDataStore for easy future selection of device events
                  recorded during the trial or for specific condition variable values.
               g) Stopping of event recording on the eye tracker device.
        """

        exp_conditions = importConditions('trial_conditions.xlsx')
        trials = TrialHandler(exp_conditions, 1)

        # Inform the ioDataStore that the experiment is using a
        # TrialHandler. The ioDataStore will create a table
        # which can be used to record the actual trial variable values (DV or IV)
        # in the order run / collected.
        #
        self.hub.createTrialHandlerRecordTable(trials)

        # Let's make some short-cuts to the devices we will be using
        # in this demo.
        try:
            tracker = self.hub.devices.tracker
        except Exception:
            # No eye tracker config found in iohub_config.yaml
            from psychopy.iohub.util import MessageDialog
            md = MessageDialog(title="No Eye Tracker Configuration Found",
                               msg="Update the iohub_config.yaml file by "
                               "uncommenting\nthe appropriate eye tracker "
                               "config lines.\n\nPress OK to exit demo.",
                               showButtons=MessageDialog.OK_BUTTON,
                               dialogType=MessageDialog.ERROR_DIALOG,
                               allowCancel=False,
                               display_index=0)
            md.show()
            return 1

        display = self.hub.devices.display
        kb = self.hub.devices.keyboard
        mouse = self.hub.devices.mouse

        # Start by running the eye tracker default setup procedure.
        # The details of the setup procedure (calibration, validation, etc)
        # are unique to each implementation of the Common Eye Tracker Interface.
        # All have the common end goal of calibrating the eye tracking system
        # prior to data collection.
        # Please see the eye tracker interface implementation details for the
        # hardware being used at:
        # http://www.isolver-solutions.com/iohubdocs/iohub/api_and_manual/device_details/eyetracker.html#eye-tracking-hardware-implementations
        #
        tracker.runSetupProcedure()

        # Create a psychopy window for the experiment graphics,
        # ioHub supports the use of one full screen window during
        # the experiment runtime. (If you are using a window at all).
        #
        res = display.getPixelResolution(
        )  # Current pixel resolution of the Display to be used
        coord_type = display.getCoordinateType()
        window = visual.Window(
            res,
            monitor=display.getPsychopyMonitorName(
            ),  # name of the PsychoPy Monitor Config file if used.
            units=coord_type,  # coordinate space to use.
            fullscr=True,  # We need full screen mode.
            allowGUI=False,  # We want it to be borderless
            screen=display.getIndex(
            )  # The display index to use, assuming a multi display setup.
        )

        # Hide the 'system mouse cursor' during the experiment.
        #
        mouse.setSystemCursorVisibility(False)

        # Create a dict of image stim for trials and a gaze blob to show the
        # reported gaze position with.
        #
        image_cache = dict()
        image_names = [
            'canal.jpg', 'fall.jpg', 'party.jpg', 'swimming.jpg', 'lake.jpg'
        ]
        for iname in image_names:
            image_cache[iname] = visual.ImageStim(window,
                                                  image=os.path.join(
                                                      './images/', iname),
                                                  name=iname,
                                                  units=coord_type)

        # Create a circle to use for the Gaze Cursor. Current units assume pix.
        #
        gaze_dot = visual.GratingStim(window,
                                      tex=None,
                                      mask="gauss",
                                      pos=(0, 0),
                                      size=(66, 66),
                                      color='green',
                                      units=coord_type)

        # Create a Text Stim for use on /instuction/ type screens.
        # Current units assume pix.
        instructions_text_stim = visual.TextStim(window,
                                                 text='',
                                                 pos=[0, 0],
                                                 height=24,
                                                 color=[-1, -1, -1],
                                                 colorSpace='rgb',
                                                 alignHoriz='center',
                                                 alignVert='center',
                                                 wrapWidth=window.size[0] * .9)

        # Update Instruction Text and display on screen.
        # Send Message to ioHub DataStore with Exp. Start Screen display time.
        #
        instuction_text = "Press Any Key to Start Experiment."
        instructions_text_stim.setText(instuction_text)
        instructions_text_stim.draw()
        flip_time = window.flip()
        self.hub.sendMessageEvent(text="EXPERIMENT_START", sec_time=flip_time)

        # Wait until a key event occurs after the instructions are displayed
        self.hub.clearEvents('all')
        kb.waitForPresses()

        # Send some information to the ioDataStore as experiment messages,
        # including the experiment and session id's, the calculated pixels per
        # degree, display resolution, etc.
        #
        self.hub.sendMessageEvent(text="IO_HUB EXPERIMENT_INFO START")
        self.hub.sendMessageEvent(text="ioHub Experiment started {0}".format(
            getCurrentDateTimeString()))
        self.hub.sendMessageEvent(
            text="Experiment ID: {0}, Session ID: {1}".format(
                self.hub.experimentID, self.hub.experimentSessionID))
        self.hub.sendMessageEvent(
            text="Stimulus Screen ID: {0}, Size (pixels): {1}, CoordType: {2}".
            format(display.getIndex(), display.getPixelResolution(),
                   display.getCoordinateType()))
        self.hub.sendMessageEvent(
            text="Calculated Pixels Per Degree: {0} x, {1} y".format(
                *display.getPixelsPerDegree()))
        self.hub.sendMessageEvent(text="IO_HUB EXPERIMENT_INFO END")

        self.hub.clearEvents('all')

        # For each trial in the set of trials within the current block.
        #
        t = 0
        for trial in trials:
            # Update the instruction screen text to indicate
            # a trial is about to start.
            #
            instuction_text = "Press Space Key To Start Trial %d" % t
            instructions_text_stim.setText(instuction_text)
            instructions_text_stim.draw()
            flip_time = window.flip()
            self.hub.sendMessageEvent(text="EXPERIMENT_START",
                                      sec_time=flip_time)

            # Wait until a space key press event occurs after the
            # start trial instuctions have been displayed.
            #
            self.hub.clearEvents('all')
            kb.waitForPresses(keys=[
                ' ',
            ])

            # Space Key has been pressed, start the trial.
            # Set the current session and trial id values to be saved
            # in the ioDataStore for the upcoming trial.
            #

            trial['session_id'] = self.hub.getSessionID()
            trial['trial_id'] = t + 1

            # Send a msg to the ioHub indicating that the trial started, and the time of
            # the first retrace displaying the trial stm.
            #
            self.hub.sendMessageEvent(text="TRIAL_START", sec_time=flip_time)

            # Start Recording Eye Data
            #
            tracker.setRecordingState(True)

            # Get the image stim for this trial.
            #
            imageStim = image_cache[trial['IMAGE_NAME']]
            imageStim.draw()
            flip_time = window.flip()
            # Clear all the events received prior to the trial start.
            #
            self.hub.clearEvents('all')
            # Send a msg to the ioHub indicating that the trial started,
            # and the time of the first retrace displaying the trial stim.
            #
            self.hub.sendMessageEvent(text="TRIAL_START", sec_time=flip_time)
            # Set the value of the trial start variable for this trial
            #
            trial['TRIAL_START'] = flip_time

            # Loop until we get a keyboard event
            #
            run_trial = True
            while run_trial is True:
                # Get the latest gaze position in display coord space..
                #
                gpos = tracker.getPosition()
                if type(gpos) in [tuple, list]:
                    # If we have a gaze position from the tracker,
                    # redraw the background image and then the
                    # gaze_cursor at the current eye position.
                    #
                    gaze_dot.setPos([gpos[0], gpos[1]])
                    imageStim.draw()
                    gaze_dot.draw()
                else:
                    # Otherwise just draw the background image.
                    # This will remove the gaze cursor from the screen
                    # when the eye tracker is not successfully
                    # tracking eye position.
                    #
                    imageStim.draw()

                # Flip video buffers, displaying the stim we just
                # updated.
                #
                flip_time = window.flip()

                # Send an experiment message to the ioDataStore
                # indicating the time the image was drawn and
                # current position of gaze spot.
                #
                if type(gpos) in [tuple, list]:
                    self.hub.sendMessageEvent(
                        "IMAGE_UPDATE %s %.3f %.3f" %
                        (trial['IMAGE_NAME'], gpos[0], gpos[1]),
                        sec_time=flip_time)
                else:
                    self.hub.sendMessageEvent("IMAGE_UPDATE %s [NO GAZE]" %
                                              (trial['IMAGE_NAME']),
                                              sec_time=flip_time)

                # Check any new keyboard press events by a space key.
                # If one is found, set the trial end variable and break.
                # from the loop
                if kb.getPresses(keys=[
                        ' ',
                ]):
                    run_trial = False
                    break

            # The trial has ended, so update the trial end time condition value,
            # and send a message to the ioDataStore with the trial end time.
            #
            flip_time = window.flip()
            trial['TRIAL_END'] = flip_time
            self.hub.sendMessageEvent(text="TRIAL_END %d" % t,
                                      sec_time=flip_time)

            # Stop recording eye data.
            # In this example, we have no use for any eye data
            # between trials, so why save it.
            #
            tracker.setRecordingState(False)

            # Save the experiment condition variable values for this
            # trial to the ioDataStore.
            #
            self.hub.addRowToConditionVariableTable(trial.values())

            # Clear all event buffers
            #
            self.hub.clearEvents('all')
            t += 1

        # All trials have been run, so end the experiment.
        #

        flip_time = window.flip()
        self.hub.sendMessageEvent(text='EXPERIMENT_COMPLETE',
                                  sec_time=flip_time)

        # Disconnect the eye tracking device.
        #
        tracker.setConnectionState(False)

        # The experiment is done, all trials have been run.
        # Clear the screen and show an 'experiment  done' message using the
        # instructionScreen text.
        #
        instuction_text = "Press Any Key to Exit Demo"
        instructions_text_stim.setText(instuction_text)
        instructions_text_stim.draw()
        flip_time = window.flip()
        self.hub.sendMessageEvent(text="SHOW_DONE_TEXT", sec_time=flip_time)
        self.hub.clearEvents('all')
        # wait until any key is pressed
        kb.waitForPresses()
コード例 #3
0
# Draw Instructions / Inform ioHub Experiment Starting

instruct.draw()
flip_time = win.flip()  # logs time between refreshes consecutivly from onset
io.sendMessageEvent(text="EXPERIMENT_START", sec_time=flip_time)

io.clearEvents('all')  # to clear the buffer of events such as key presses etc
kb.waitForPresses(keys=' ')  # similar to psychopys waitKeys func

# Send some information to the ioDataStore as experiment messages,
# including the experiment and session id's, the calculated pixels per
# degree, display resolution, etc.

io.sendMessageEvent(text="IO_HUB EXPERIMENT_INFO START")
io.sendMessageEvent(
    text="ioHub Experiment started {0}".format(getCurrentDateTimeString()))
io.sendMessageEvent(text="Experiment ID: {0}, Session ID: {1}".format(
    io.experimentID, io.experimentSessionID))
io.sendMessageEvent(
    text="Stimulus Screen ID: {0}, Size (pixels): {1}, CoordType: {2}".format(
        display.getIndex(), display.getPixelResolution(),
        display.getCoordinateType()))
io.sendMessageEvent(text="Calculated Pixels Per Degree: {0} x, {1} y".format(
    *display.getPixelsPerDegree()))
io.sendMessageEvent(text="IO_HUB EXPERIMENT_INFO END")

io.clearEvents('all')

## -- ## Start Trial Sequence ##--##

t = 0
コード例 #4
0
ファイル: run.py プロジェクト: NSalem/psychopy
    def run(self,*args):
        """
        The run method contains your experiment logic. In this example we:

        1) Load an xlsx file containing the trial conditions for use
           during the experiment. All DV's and IV's to be used or updated
           for each trial must be specified as columns in the xlsx file.
        2) Inform the ioDataStore of the trial conditions to be used, resulting in the
           creation of an experiment specific results table, with a field for each
           DV and IV defined in the xls file.
        3) Run the eye tracking device's runSetupProcedure(), which allows
           the calibration, validation, etc. of the eye tracking system being used.
        4) Create the experiment runtime graphics, including creating a cache of
           images to be displayed for each trial of the experiment.
        5) Run the experimental block of trials of the demo. Each trial sequence
           consists of:
               a) The participant pressing the SPACE key to start the trial.
               b) Randomly displaying one of the background images for a trial.
               c) Starting recording of data from the eye tracker.
               d) Displaying a gaze contingent dot located at the gaze position reported by the eye tracker.
               e) Ending each trial by pressing the SPACE key.
               f) Sending any condition variable value changes for that trial
                  to the ioDataStore for easy future selection of device events
                  recorded during the trial or for specific condition variable values.
               g) Stopping of event recording on the eye tracker device.
        """

        exp_conditions=importConditions('trial_conditions.xlsx')
        trials = TrialHandler(exp_conditions,1)

        # Inform the ioDataStore that the experiment is using a
        # TrialHandler. The ioDataStore will create a table
        # which can be used to record the actual trial variable values (DV or IV)
        # in the order run / collected.
        #
        self.hub.createTrialHandlerRecordTable(trials)

        # Let's make some short-cuts to the devices we will be using
        # in this demo.
        try:
            tracker=self.hub.devices.tracker
        except:
            # No eye tracker config found in iohub_config.yaml
            from psychopy.iohub.util import MessageDialog
            md = MessageDialog(title="No Eye Tracker Configuration Found",
                               msg="Update the iohub_config.yaml file by "
                               "uncommenting\nthe appropriate eye tracker "
                               "config lines.\n\nPress OK to exit demo.",
                               showButtons=MessageDialog.OK_BUTTON,
                               dialogType=MessageDialog.ERROR_DIALOG,
                               allowCancel=False,
                               display_index=0)
            md.show()
            return 1

        display=self.hub.devices.display
        kb=self.hub.devices.keyboard
        mouse=self.hub.devices.mouse

        # Start by running the eye tracker default setup procedure.
        # The details of the setup procedure (calibration, validation, etc)
        # are unique to each implementation of the Common Eye Tracker Interface.
        # All have the common end goal of calibrating the eye tracking system
        # prior to data collection.
        # Please see the eye tracker interface implementation details for the
        # hardware being used at:
        # http://www.isolver-solutions.com/iohubdocs/iohub/api_and_manual/device_details/eyetracker.html#eye-tracking-hardware-implementations
        #
        tracker.runSetupProcedure()

        # Create a psychopy window for the experiment graphics,
        # ioHub supports the use of one full screen window during
        # the experiment runtime. (If you are using a window at all).
        #
        res=display.getPixelResolution() # Current pixel resolution of the Display to be used
        coord_type=display.getCoordinateType()
        window=visual.Window(res,monitor=display.getPsychopyMonitorName(), # name of the PsychoPy Monitor Config file if used.
                                    units=coord_type, # coordinate space to use.
                                    fullscr=True, # We need full screen mode.
                                    allowGUI=False, # We want it to be borderless
                                    screen= display.getIndex() # The display index to use, assuming a multi display setup.
                                    )

        # Hide the 'system mouse cursor' during the experiment.
        #
        mouse.setSystemCursorVisibility(False)

        # Create a dict of image stim for trials and a gaze blob to show the
        # reported gaze position with.
        #
        image_cache=dict()
        image_names=['canal.jpg','fall.jpg','party.jpg','swimming.jpg','lake.jpg']
        for iname in image_names:
            image_cache[iname]=visual.ImageStim(window, image=os.path.join('./images/',iname),
                        name=iname,units=coord_type)

        # Create a circle to use for the Gaze Cursor. Current units assume pix.
        #
        gaze_dot =visual.GratingStim(window,tex=None, mask="gauss",
                                     pos=(0,0 ),size=(66,66),color='green',
                                                        units=coord_type)

        # Create a Text Stim for use on /instuction/ type screens.
        # Current units assume pix.
        instructions_text_stim = visual.TextStim(window, text='', pos = [0,0],
                                    height=24, color=[-1,-1,-1], colorSpace='rgb',
                                    alignHoriz='center', alignVert='center',
                                    wrapWidth=window.size[0]*.9)


        # Update Instruction Text and display on screen.
        # Send Message to ioHub DataStore with Exp. Start Screen display time.
        #
        instuction_text="Press Any Key to Start Experiment."
        instructions_text_stim.setText(instuction_text)
        instructions_text_stim.draw()
        flip_time=window.flip()
        self.hub.sendMessageEvent(text="EXPERIMENT_START",sec_time=flip_time)

        # Wait until a key event occurs after the instructions are displayed
        self.hub.clearEvents('all')
        kb.waitForPresses()

        # Send some information to the ioDataStore as experiment messages,
        # including the experiment and session id's, the calculated pixels per
        # degree, display resolution, etc.
        #
        self.hub.sendMessageEvent(text="IO_HUB EXPERIMENT_INFO START")
        self.hub.sendMessageEvent(text="ioHub Experiment started {0}".format(getCurrentDateTimeString()))
        self.hub.sendMessageEvent(text="Experiment ID: {0}, Session ID: {1}".format(self.hub.experimentID,self.hub.experimentSessionID))
        self.hub.sendMessageEvent(text="Stimulus Screen ID: {0}, Size (pixels): {1}, CoordType: {2}".format(display.getIndex(),display.getPixelResolution(),display.getCoordinateType()))
        self.hub.sendMessageEvent(text="Calculated Pixels Per Degree: {0} x, {1} y".format(*display.getPixelsPerDegree()))
        self.hub.sendMessageEvent(text="IO_HUB EXPERIMENT_INFO END")

        self.hub.clearEvents('all')

        # For each trial in the set of trials within the current block.
        #
        t=0
        for trial in trials:
            # Update the instruction screen text to indicate
            # a trial is about to start.
            #
            instuction_text="Press Space Key To Start Trial %d"%t
            instructions_text_stim.setText(instuction_text)
            instructions_text_stim.draw()
            flip_time=window.flip()
            self.hub.sendMessageEvent(text="EXPERIMENT_START",sec_time=flip_time)


            # Wait until a space key press event occurs after the
            # start trial instuctions have been displayed.
            #
            self.hub.clearEvents('all')
            kb.waitForPresses(keys=[' ',])


            # Space Key has been pressed, start the trial.
            # Set the current session and trial id values to be saved
            # in the ioDataStore for the upcoming trial.
            #

            trial['session_id']=self.hub.getSessionID()
            trial['trial_id']=t+1

            # Send a msg to the ioHub indicating that the trial started, and the time of
            # the first retrace displaying the trial stm.
            #
            self.hub.sendMessageEvent(text="TRIAL_START",sec_time=flip_time)


            # Start Recording Eye Data
            #
            tracker.setRecordingState(True)

            # Get the image stim for this trial.
            #
            imageStim=image_cache[trial['IMAGE_NAME']]
            imageStim.draw()
            flip_time=window.flip()
            # Clear all the events received prior to the trial start.
            #
            self.hub.clearEvents('all')
            # Send a msg to the ioHub indicating that the trial started,
            # and the time of the first retrace displaying the trial stim.
            #
            self.hub.sendMessageEvent(text="TRIAL_START",sec_time=flip_time)
            # Set the value of the trial start variable for this trial
            #
            trial['TRIAL_START']=flip_time

            # Loop until we get a keyboard event
            #
            run_trial=True
            while run_trial is True:
                # Get the latest gaze position in display coord space..
                #
                gpos=tracker.getPosition()
                if type(gpos) in [tuple,list]:
                    # If we have a gaze position from the tracker,
                    # redraw the background image and then the
                    # gaze_cursor at the current eye position.
                    #
                    gaze_dot.setPos([gpos[0],gpos[1]])
                    imageStim.draw()
                    gaze_dot.draw()
                else:
                    # Otherwise just draw the background image.
                    # This will remove the gaze cursor from the screen
                    # when the eye tracker is not successfully
                    # tracking eye position.
                    #
                    imageStim.draw()

                # Flip video buffers, displaying the stim we just
                # updated.
                #
                flip_time=window.flip()

                # Send an experiment message to the ioDataStore
                # indicating the time the image was drawn and
                # current position of gaze spot.
                #
                if type(gpos) in [tuple,list]:
                    self.hub.sendMessageEvent("IMAGE_UPDATE %s %.3f %.3f"%(
                                                trial['IMAGE_NAME'],gpos[0],gpos[1]),
                                                sec_time=flip_time)
                else:
                    self.hub.sendMessageEvent("IMAGE_UPDATE %s [NO GAZE]"%(
                                                trial['IMAGE_NAME']),
                                                sec_time=flip_time)

                # Check any new keyboard press events by a space key.
                # If one is found, set the trial end variable and break.
                # from the loop
                if kb.getPresses(keys=[' ',]):
                    run_trial=False
                    break

            # The trial has ended, so update the trial end time condition value,
            # and send a message to the ioDataStore with the trial end time.
            #
            flip_time=window.flip()
            trial['TRIAL_END']=flip_time
            self.hub.sendMessageEvent(text="TRIAL_END %d"%t,sec_time=flip_time)

            # Stop recording eye data.
            # In this example, we have no use for any eye data
            # between trials, so why save it.
            #
            tracker.setRecordingState(False)

            # Save the experiment condition variable values for this
            # trial to the ioDataStore.
            #
            self.hub.addRowToConditionVariableTable(trial.values())

            # Clear all event buffers
            #
            self.hub.clearEvents('all')
            t+=1

        # All trials have been run, so end the experiment.
        #

        flip_time=window.flip()
        self.hub.sendMessageEvent(text='EXPERIMENT_COMPLETE',sec_time=flip_time)

        # Disconnect the eye tracking device.
        #
        tracker.setConnectionState(False)

        # The experiment is done, all trials have been run.
        # Clear the screen and show an 'experiment  done' message using the
        # instructionScreen text.
        #
        instuction_text="Press Any Key to Exit Demo"
        instructions_text_stim.setText(instuction_text)
        instructions_text_stim.draw()
        flip_time=window.flip()
        self.hub.sendMessageEvent(text="SHOW_DONE_TEXT",sec_time=flip_time)
        self.hub.clearEvents('all')
        # wait until any key is pressed
        kb.waitForPresses()
コード例 #5
0
ファイル: run.py プロジェクト: GentBinaku/psychopy
    def run(self,*args):
        """
        The run method contains your experiment logic. It is equal to what would be in your main psychopy experiment
        script.py file in a standard psychopy experiment setup. That is all there is too it really.
        """

        self.trial_conditions=ExperimentVariableProvider('trial_conditions.xls',
                            'BLOCK',None,False,True)
        self.hub.initializeConditionVariableTable(self.trial_conditions) 
                                 
        selected_eyetracker_name=args[0]
        # Let's make some short-cuts to the devices we will be using in this 'experiment'.
        tracker=self.hub.devices.tracker
        display=self.hub.devices.display
        kb=self.hub.devices.kb
        mouse=self.hub.devices.mouse            
                    
        # Create a psychopy window, full screen resolution, full screen mode...
        #
        res=display.getPixelResolution()
        window=visual.Window(res,monitor=display.getPsychopyMonitorName(),
                                    units=display.getCoordinateType(),
                                    fullscr=True,
                                    allowGUI=False,
                                    screen= display.getIndex()
                                    )

        # Hide the 'system mouse cursor'
        #        
        mouse.setSystemCursorVisibility(False)

        # Start by running the eye tracker default setup procedure.
        # if validation results are returned, they would be in the form of a dict,
        # so print them, otherwise just check that EYETRACKER_OK was returned.
        #
        # minimize the psychopy experiment window
        #
        window.winHandle.minimize()
        result=tracker.runSetupProcedure()
        if isinstance(result,dict):
            print "Validation Accuracy Results: ", result
        elif result != EyeTrackerConstants.EYETRACKER_OK:
            print "An error occurred during eye tracker user setup: ",EyeTrackerConstants.getName(result)
        # restore the psychopy experiment window
        #
        window.winHandle.maximize()
        window.winHandle.activate()
        # Create a dict of image stim for trials and a gaze blob to show gaze position.
        #
        display_coord_type=display.getCoordinateType()
        image_cache=dict()
        image_names=['canal.jpg','fall.jpg','party.jpg','swimming.jpg','lake.jpg']

        for iname in image_names:
            image_cache[iname]=visual.ImageStim(window, image=os.path.join('./images/',iname), 
                        name=iname,units=display_coord_type)
                        
        gaze_dot =visual.GratingStim(window,tex=None, mask="gauss", 
                                     pos=(0,0 ),size=(66,66),color='green', 
                                                        units=display_coord_type)
        instructions_text_stim = visual.TextStim(window, text='', pos = [0,0], height=24, 
                       color=[-1,-1,-1], colorSpace='rgb',alignHoriz='center', alignVert='center',wrapWidth=window.size[0]*.9)


        # Update Instruction Text and display on screen.
        # Send Message to ioHub DataStore with Exp. Start Screen display time.
        #
        instuction_text="Press Any Key to Start Experiment."
        instructions_text_stim.setText(instuction_text)        
        instructions_text_stim.draw()
        flip_time=window.flip()
        self.hub.sendMessageEvent(text="EXPERIMENT_START",sec_time=flip_time)
        
        # wait until a key event occurs after the instructions are displayed
        self.hub.clearEvents('all')
        while not kb.getEvents():
            self.hub.wait(0.2)
            
        
        # Send some information to the ioHub DataStore as experiment messages
        # including the eye tracker being used for this session.
        #
        self.hub.sendMessageEvent(text="IO_HUB EXPERIMENT_INFO START")
        self.hub.sendMessageEvent(text="ioHub Experiment started {0}".format(getCurrentDateTimeString()))
        self.hub.sendMessageEvent(text="Experiment ID: {0}, Session ID: {1}".format(self.hub.experimentID,self.hub.experimentSessionID))
        self.hub.sendMessageEvent(text="Stimulus Screen ID: {0}, Size (pixels): {1}, CoordType: {2}".format(display.getIndex(),display.getPixelResolution(),display.getCoordinateType()))
        self.hub.sendMessageEvent(text="Calculated Pixels Per Degree: {0} x, {1} y".format(*display.getPixelsPerDegree()))        
        self.hub.sendMessageEvent(text="Eye Tracker being Used: {0}".format(selected_eyetracker_name))
        self.hub.sendMessageEvent(text="IO_HUB EXPERIMENT_INFO END")


        practice_blocks=self.trial_conditions.getPracticeBlocks()
        exp_blocks=self.trial_conditions.getExperimentBlocks()
        block_types=[practice_blocks,exp_blocks]
        
        for blocks in block_types:
            # for each block in the group of blocks.....
            for trial_set in blocks.getNextConditionSet():
                self.hub.clearEvents('all')
                t=0
                for trial in trial_set.getNextConditionSet():    
                    # Update the instuction screen text...
                    #            
                    instuction_text="Press Space Key To Start Trial %d"%t
                    instructions_text_stim.setText(instuction_text)        
                    instructions_text_stim.draw()
                    flip_time=window.flip()
                    self.hub.sendMessageEvent(text="EXPERIMENT_START",sec_time=flip_time)
                    
                    start_trial=False
                    
                    # wait until a space key 'press' event occurs after the instructions are displayed
                    self.hub.clearEvents('all')
                    while not start_trial:
                        for event in kb.getEvents(event_type_id=EventConstants.KEYBOARD_PRESS):
                            if event.key == ' ':
                                start_trial=True
                                break
                        self.hub.wait(0.2)
        
                    # So request to start trial has occurred...
                    # Clear the screen, start recording eye data, and clear all events
                    # received to far.
                    #
                    flip_time=window.flip()
                    trial['session_id']=self.hub.getSessionID()
                    trial['trial_id']=t+1 
                    trial['TRIAL_START']=flip_time
                    self.hub.sendMessageEvent(text="TRIAL_START",sec_time=flip_time)
                    self.hub.clearEvents('all')
                    tracker.setRecordingState(True)            
                    
                    # Get the image name for this trial
                    #
                    imageStim=image_cache[trial['IMAGE_NAME']]
        
                    # Loop until we get a keyboard event
                    #
                    run_trial=True
                    while run_trial is True:
                        # Get the latest gaze position in dispolay coord space..
                        #
                        gpos=tracker.getLastGazePosition()
                        if isinstance(gpos,(tuple,list)):
                            # If we have a gaze position from the tracker, draw the 
                            # background image and then the gaze_cursor.
                            #
                            gaze_dot.setPos(gpos)
                            imageStim.draw()
                            gaze_dot.draw()
                        else:
                            # Otherwise just draw the background image.
                            #
                            imageStim.draw()
                        
                        # flip video buffers, updating the display with the stim we just
                        # updated.
                        #
                        flip_time=window.flip()   
                        
                        # Send a message to the ioHub Process / DataStore indicating 
                        # the time the image was drawn and current position of gaze spot.
                        #
                        if isinstance(gpos,(tuple,list)):
                            self.hub.sendMessageEvent("IMAGE_UPDATE %s %.3f %.3f"%(iname,gpos[0],gpos[1]),sec_time=flip_time)
                        else:
                            self.hub.sendMessageEvent("IMAGE_UPDATE %s [NO GAZE]"%(iname),sec_time=flip_time)
         
                        # Check any new keyboard char events for a space key.
                        # If one is found, set the trial end variable.
                        #
                        for event in kb.getEvents(event_type_id=EventConstants.KEYBOARD_PRESS):
                            if event.key == ' ':
                                run_trial=False
                                break
                
                    # So the trial has ended, send a message to the DataStore
                    # with the trial end time and stop recording eye data.
                    # In this example, we have no use for any eye data between trials, so why save it.
                    #
                    flip_time=window.flip()
                    trial['TRIAL_END']=flip_time
                    self.hub.sendMessageEvent(text="TRIAL_END %d"%t,sec_time=flip_time)
                    tracker.setRecordingState(False)
                    # Save the Experiment Condition Variable Data for this trial to the
                    # ioDataStore.
                    #
                    self.hub.addRowToConditionVariableTable(trial.tolist())             
                    self.hub.clearEvents('all')
                    t+=1

        # Disconnect the eye tracking device.
        #
        tracker.setConnectionState(False)

        # Update the instuction screen text...
        #            
        instuction_text="Press Any Key to Exit Demo"
        instructions_text_stim.setText(instuction_text)        
        instructions_text_stim.draw()
        flip_time=window.flip()
        self.hub.sendMessageEvent(text="SHOW_DONE_TEXT",sec_time=flip_time)
     
        # wait until any key is pressed
        self.hub.clearEvents('all')
        while not kb.getEvents(event_type_id=EventConstants.KEYBOARD_PRESS):
            self.hub.wait(0.2)
            
        # So the experiment is done, all trials have been run.
        # Clear the screen and show an 'experiment  done' message using the 
        # instructionScreen state. What for the trigger to exit that state.
        # (i.e. the space key was pressed)
        #
        flip_time=window.flip()
        self.hub.sendMessageEvent(text='EXPERIMENT_COMPLETE',sec_time=flip_time)
コード例 #6
0
ファイル: run.py プロジェクト: tgwang/flask-sqlalchemy
    def run(self, *args):
        """
        The run method contains your experiment logic. It is equal to what would be in your main psychopy experiment
        script.py file in a standard psychopy experiment setup. That is all there is too it really.
        """

        exp_conditions = importConditions('trial_conditions.xlsx')
        trials = TrialHandler(exp_conditions, 1)

        # Inform the ioDataStore that the experiment is using a
        # TrialHandler. The ioDataStore will create a table
        # which can be used to record the actual trial variable values (DV or IV)
        # in the order run / collected.
        #
        self.hub.createTrialHandlerRecordTable(trials)

        selected_eyetracker_name = args[0]
        # Let's make some short-cuts to the devices we will be using in this 'experiment'.
        tracker = self.hub.devices.tracker
        display = self.hub.devices.display
        kb = self.hub.devices.kb
        mouse = self.hub.devices.mouse

        # Create a psychopy window, full screen resolution, full screen mode...
        #
        res = display.getPixelResolution()
        window = visual.Window(res,
                               monitor=display.getPsychopyMonitorName(),
                               units=display.getCoordinateType(),
                               fullscr=True,
                               allowGUI=False,
                               screen=display.getIndex())

        # Start by running the eye tracker default setup procedure.
        # if validation results are returned, they would be in the form of a dict,
        # so print them, otherwise just check that EYETRACKER_OK was returned.
        #
        # minimize the psychopy experiment window
        #
        window.winHandle.minimize()

        result = tracker.runSetupProcedure()
        if isinstance(result, dict):
            print "Validation Accuracy Results: ", result

        # restore the psychopy experiment window
        #
        window.winHandle.maximize()
        window.winHandle.activate()

        # Create a dict of image stim for trials and a gaze blob to show gaze position.
        #
        display_coord_type = display.getCoordinateType()
        image_cache = dict()
        image_names = [
            'canal.jpg', 'fall.jpg', 'party.jpg', 'swimming.jpg', 'lake.jpg'
        ]

        for iname in image_names:
            image_cache[iname] = visual.ImageStim(window,
                                                  image=os.path.join(
                                                      './images/', iname),
                                                  name=iname,
                                                  units=display_coord_type)

        gaze_dot = visual.GratingStim(window,
                                      tex=None,
                                      mask="gauss",
                                      pos=(0, 0),
                                      size=(66, 66),
                                      color='green',
                                      units=display_coord_type)
        instructions_text_stim = visual.TextStim(window,
                                                 text='',
                                                 pos=[0, 0],
                                                 height=24,
                                                 color=[-1, -1, -1],
                                                 colorSpace='rgb',
                                                 alignHoriz='center',
                                                 alignVert='center',
                                                 wrapWidth=window.size[0] * .9)

        # Update Instruction Text and display on screen.
        # Send Message to ioHub DataStore with Exp. Start Screen display time.
        #
        instuction_text = "Press Any Key to Start Experiment."
        instructions_text_stim.setText(instuction_text)
        instructions_text_stim.draw()
        flip_time = window.flip()
        self.hub.sendMessageEvent(text="EXPERIMENT_START", sec_time=flip_time)

        # wait until a key event occurs after the instructions are displayed
        self.hub.clearEvents('all')
        while not kb.getEvents():
            self.hub.wait(0.2)

        # Send some information to the ioHub DataStore as experiment messages
        # including the eye tracker being used for this session.
        #
        self.hub.sendMessageEvent(text="IO_HUB EXPERIMENT_INFO START")
        self.hub.sendMessageEvent(text="ioHub Experiment started {0}".format(
            getCurrentDateTimeString()))
        self.hub.sendMessageEvent(
            text="Experiment ID: {0}, Session ID: {1}".format(
                self.hub.experimentID, self.hub.experimentSessionID))
        self.hub.sendMessageEvent(
            text="Stimulus Screen ID: {0}, Size (pixels): {1}, CoordType: {2}".
            format(display.getIndex(), display.getPixelResolution(),
                   display.getCoordinateType()))
        self.hub.sendMessageEvent(
            text="Calculated Pixels Per Degree: {0} x, {1} y".format(
                *display.getPixelsPerDegree()))
        self.hub.sendMessageEvent(text="Eye Tracker being Used: {0}".format(
            selected_eyetracker_name))
        self.hub.sendMessageEvent(text="IO_HUB EXPERIMENT_INFO END")

        self.hub.clearEvents('all')
        t = 0
        for trial in trials:
            # Update the instuction screen text...
            #
            instuction_text = "Press Space Key To Start Trial %d" % t
            instructions_text_stim.setText(instuction_text)
            instructions_text_stim.draw()
            flip_time = window.flip()
            self.hub.sendMessageEvent(text="EXPERIMENT_START",
                                      sec_time=flip_time)

            start_trial = False

            # wait until a space key 'press' event occurs after the instructions are displayed
            self.hub.clearEvents('all')
            while not start_trial:
                for event in kb.getEvents(
                        event_type_id=EventConstants.KEYBOARD_PRESS):
                    if event.key == ' ':
                        start_trial = True
                        break
                self.hub.wait(0.2)

            # So request to start trial has occurred...
            # Clear the screen, start recording eye data, and clear all events
            # received to far.
            #
            flip_time = window.flip()
            trial['session_id'] = self.hub.getSessionID()
            trial['trial_id'] = t + 1
            trial['TRIAL_START'] = flip_time
            self.hub.sendMessageEvent(text="TRIAL_START", sec_time=flip_time)
            self.hub.clearEvents('all')
            tracker.setRecordingState(True)

            # Get the image name for this trial
            #
            imageStim = image_cache[trial['IMAGE_NAME']]

            # Loop until we get a keyboard event
            #
            run_trial = True
            while run_trial is True:
                # Get the latest gaze position in dispolay coord space..
                #
                gpos = tracker.getLastGazePosition()
                if isinstance(gpos, (tuple, list)):
                    # If we have a gaze position from the tracker, draw the
                    # background image and then the gaze_cursor.
                    #
                    gaze_dot.setPos(gpos)
                    imageStim.draw()
                    gaze_dot.draw()
                else:
                    # Otherwise just draw the background image.
                    #
                    imageStim.draw()

                # flip video buffers, updating the display with the stim we just
                # updated.
                #
                flip_time = window.flip()

                # Send a message to the ioHub Process / DataStore indicating
                # the time the image was drawn and current position of gaze spot.
                #
                if isinstance(gpos, (tuple, list)):
                    self.hub.sendMessageEvent("IMAGE_UPDATE %s %.3f %.3f" %
                                              (iname, gpos[0], gpos[1]),
                                              sec_time=flip_time)
                else:
                    self.hub.sendMessageEvent("IMAGE_UPDATE %s [NO GAZE]" %
                                              (iname),
                                              sec_time=flip_time)

                # Check any new keyboard char events for a space key.
                # If one is found, set the trial end variable.
                #
                for event in kb.getEvents(
                        event_type_id=EventConstants.KEYBOARD_PRESS):
                    if event.key == ' ':
                        run_trial = False
                        break

            # So the trial has ended, send a message to the DataStore
            # with the trial end time and stop recording eye data.
            # In this example, we have no use for any eye data between trials, so why save it.
            #
            flip_time = window.flip()
            trial['TRIAL_END'] = flip_time
            self.hub.sendMessageEvent(text="TRIAL_END %d" % t,
                                      sec_time=flip_time)
            tracker.setRecordingState(False)
            # Save the Experiment Condition Variable Data for this trial to the
            # ioDataStore.
            #
            self.hub.addRowToConditionVariableTable(trial.values())
            self.hub.clearEvents('all')
            t += 1

        # Disconnect the eye tracking device.
        #
        tracker.setConnectionState(False)

        # Update the instuction screen text...
        #
        instuction_text = "Press Any Key to Exit Demo"
        instructions_text_stim.setText(instuction_text)
        instructions_text_stim.draw()
        flip_time = window.flip()
        self.hub.sendMessageEvent(text="SHOW_DONE_TEXT", sec_time=flip_time)

        # wait until any key is pressed
        self.hub.clearEvents('all')
        while not kb.getEvents(event_type_id=EventConstants.KEYBOARD_PRESS):
            self.hub.wait(0.2)

        # So the experiment is done, all trials have been run.
        # Clear the screen and show an 'experiment  done' message using the
        # instructionScreen state. What for the trigger to exit that state.
        # (i.e. the space key was pressed)
        #
        flip_time = window.flip()
        self.hub.sendMessageEvent(text='EXPERIMENT_COMPLETE',
                                  sec_time=flip_time)
コード例 #7
0
 def run(self, *args):
     from .switch import Switch
     # Hardware configuration
     kb = self.hub.devices.keyboard
     display = self.hub.devices.display
     try:
         tracker = self.hub.devices.tracker
         tracker.runSetupProcedure()
         tracker.setRecordingState(False)
     except Exception:
         md = MessageDialog(
             title=u"No Eye Tracker Configuration Found",
             msg=
             u"No eyetracker selected/found.\nCheck your configuration profile.",
             showButtons=MessageDialog.OK_BUTTON,
             dialogType=MessageDialog.ERROR_DIALOG,
             allowCancel=False,
             display_index=0)
         md.show()
         core.quit()
     # Main window configuration
     win = visual.Window(size=display.getPixelResolution(),
                         monitor=display.getPsychopyMonitorName(),
                         fullscr=True,
                         units=display.getCoordinateType(),
                         allowGUI=False,
                         screen=display.getIndex())
     # Get experiment content
     execution = self.__experiment.get_execution(win=win)
     if len(execution[u"test_sequence"]) == 0:
         md = MessageDialog(
             title=u"No test available",
             msg=
             u"No available tests selected/found.\nCheck the experiment settings.",
             showButtons=MessageDialog.OK_BUTTON,
             dialogType=MessageDialog.ERROR_DIALOG,
             allowCancel=False,
             display_index=0)
         md.show()
         win.close()
         core.quit()
     # Show instructions
     text_scr = visual.TextStim(
         win=win,
         text=u"",
         pos=(0, 0),
         height=24,
         wrapWidth=win.size[0] * 0.9,
         color=u"white",
         alignHoriz=u"center",
         alignVert=u"center",
     )
     text_scr.setText(text=execution[u"instructions"] +
                      u"\n\nPress Any Key to Start Experiment.")
     text_scr.draw()
     flip_time = win.flip()
     self.hub.clearEvents(u"all")
     kb.waitForPresses()
     # Start Data Logging
     self.hub.sendMessageEvent(text=u"======= EXPERIMENT START =======",
                               sec_time=flip_time)
     self.hub.sendMessageEvent(text=u"============= INFO =============")
     self.hub.sendMessageEvent(
         text=u"Date:          {0}.".format(getCurrentDateTimeString()))
     self.hub.sendMessageEvent(
         text=u"Experiment ID: {0}.".format(self.hub.experimentID))
     self.hub.sendMessageEvent(
         text=u"Session    ID: {0}.".format(self.hub.experimentSessionID))
     self.hub.sendMessageEvent(
         text=u"Screen (ID, Size, CoordType): ({0}, {1}, {2}).".format(
             display.getIndex(), display.getPixelResolution(),
             display.getCoordinateType()))
     self.hub.sendMessageEvent(
         text=u"Screen Calculated Pixels Per Degree (x, y): ({0}, {1}).".
         format(*display.getPixelsPerDegree()))
     self.hub.sendMessageEvent(text=u"=========== END INFO ===========")
     self.hub.sendMessageEvent(text=u"===== TESTS SEQUENCE START =====")
     # Experiment presentation
     sequence_index = 0
     sequence_count = len(execution[u"test_sequence"])
     while sequence_index < sequence_count:
         # Test selection
         test_index = execution[u"test_sequence"][sequence_index]
         test = execution[u"test_data"][test_index]
         # Pre-Test action
         if (execution[u"rest"][u"active"] and sequence_index > 0
                 and sequence_index % execution[u"rest"][u"period"] == 0):
             text_scr.setText(text=u"Rest time:\n\n{0}[s]".format(
                 execution[u"rest"][u"time"]))
             text_scr.draw()
             flip_time = win.flip()
             self.hub.sendMessageEvent(
                 text=u"Rest Time (Test Count, Time[s]): ({0}, {1}).".
                 format(sequence_index, execution[u"rest"][u"time"]))
             core.wait(execution[u"rest"][u"time"])
             self.hub.sendMessageEvent(text=u"Rest Time Finished.")
         if execution[u"space_start"]:
             text_scr.setText(
                 text=u"Test: {0}\n\nPress Space to Start Experiment.".
                 format(test[u"name"]))
             text_scr.draw()
             flip_time = win.flip()
             self.hub.sendMessageEvent(
                 text=
                 u"Waiting User Input to Start Test (ID, Name): ({0}, {1})."
                 .format(test_index, test[u"name"]))
             self.hub.clearEvents(u"all")
             kb.waitForPresses(keys=[
                 u" ",
             ])
             self.hub.sendMessageEvent(text=u"User Input Received.")
         # Test presentation
         timer = core.Clock()
         this_frame = None
         next_frame = None
         frame_index = -1
         frame_count = len(test[u"frames"])
         test_end = False
         test_state = u"buffer"
         self.hub.sendMessageEvent(
             text=u"Test Start (ID, Name): ({0}, {1}).".format(
                 test_index, test[u"name"]))
         tracker.setRecordingState(True)
         while not test_end:
             with Switch(test_state) as case:
                 if case(u"buffer"):
                     next_index = frame_index + 1
                     if next_index < frame_count:
                         next_frame = test[u"frames"][next_index]
                         next_frame[u"background"].draw()
                         for component in next_frame[u"components"]:
                             component.draw()
                         self.hub.sendMessageEvent(
                             text=u"Frame Loaded (ID, Name): ({0}, {1}).".
                             format(next_index, next_frame[u"name"]))
                     else:
                         self.hub.sendMessageEvent(
                             text=u"No More Frames Available.")
                     test_state = u"flip" if next_index == 0 else u"loop"
                 elif case(u"flip"):
                     if frame_index + 1 == frame_count:
                         test_state = u"end"
                         break
                     this_frame = next_frame
                     frame_index += 1
                     if this_frame[u"is_task"]:
                         self.hub.sendMessageEvent(
                             text=
                             u"Frame Started (ID, Type, Time): ({0}, {1}, {2})."
                             .format(frame_index, u"Task",
                                     u"User dependent"))
                     else:
                         self.hub.sendMessageEvent(
                             text=
                             u"Frame Started (ID, Type, Time): ({0}, {1}, {2})."
                             .format(frame_index, u"Timed",
                                     this_frame[u"time"]))
                     flip_time = win.flip()
                     timer.reset()
                     if self.__frame_save:
                         frame_name = u"Test[{0}]_Frame[{1}].png".format(
                             test_index, frame_index)
                         win.getMovieFrame()
                         win.saveMovieFrames(self.__frame_save_path +
                                             frame_name)
                     test_state = u"buffer"
                 elif case(u"loop"):
                     if this_frame[u"is_task"]:
                         key_pressed = kb.waitForPresses(
                             keys=this_frame[u"allowed_keys"])
                         key_pressed = unicode(
                             key_pressed[len(key_pressed) - 1].key).replace(
                                 u" ", u"space")
                         self.hub.sendMessageEvent(
                             text=u"Frame Ended ({0}): ({1})".format(
                                 u"ID, Time, Selected Key, Correct Key",
                                 u"{0}, {1}, {2}, {3}".format(
                                     frame_index, timer.getTime(),
                                     key_pressed,
                                     this_frame[u"correct_keys_str"])))
                         test_state = u"flip"
                     elif timer.getTime() >= this_frame[u"time"]:
                         self.hub.sendMessageEvent(
                             text=u"Frame Ended ({0}): ({1})".format(
                                 u"ID, Time", u"{0}, {1}".format(
                                     frame_index, timer.getTime())))
                         test_state = u"flip"
                     if kb.getKeys(keys=[
                             u"escape",
                             u"q",
                     ]):
                         self.hub.sendMessageEvent(
                             text=u"== EXPERIMENT ENDED: BY USER  == ")
                         md = MessageDialog(
                             title=u"Warning",
                             msg=u"Experiment ended by user.",
                             showButtons=MessageDialog.OK_BUTTON,
                             dialogType=MessageDialog.ERROR_DIALOG,
                             allowCancel=False,
                             display_index=0)
                         md.show()
                         self.hub.quit()
                         win.close()
                         core.quit()
                 else:
                     test_end = True
         tracker.setRecordingState(False)
         self.hub.sendMessageEvent(
             text=u"Test End (ID, Name): ({0}, {1}).".format(
                 test_index, test[u"name"]))
         sequence_index += 1
     # =======================================
     # Experiment exit
     # =======================================
     self.hub.sendMessageEvent(text=u"== EXPERIMENT ENDED: NORMALLY == ")
     self.hub.quit()
     win.close()
     core.quit()
コード例 #8
0
    def run(self, *args):
        """
        The run method contains your experiment logic. It is equal to what would be in your main psychopy experiment
        script.py file in a standard psychopy experiment setup. That is all there is too it really.
        """

        selected_eyetracker_name = args[0]
        # Let's make some short-cuts to the devices we will be using in this 'experiment'.
        tracker = self.hub.devices.tracker
        display = self.hub.devices.display
        kb = self.hub.devices.kb
        mouse = self.hub.devices.mouse

        # Start by running the eye tracker default setup procedure.
        # if validation results are returned, they would be in the form of a dict,
        # so print them, otherwise just check that EYETRACKER_OK was returned.
        #
        result = tracker.runSetupProcedure()
        if isinstance(result, dict):
            print "Validation Accuracy Results: ", result
        elif result != EyeTrackerConstants.EYETRACKER_OK:
            print "An error occurred during eye tracker user setup: ", EyeTrackerConstants.getName(
                result)

        # Create a psychopy window, full screen resolution, full screen mode...
        #
        window = FullScreenWindow(display)

        # Hide the 'system mouse cursor'
        #
        mouse.setSystemCursorVisibility(False)

        # Create a dict of image stim for trials and a gaze blob to show gaze position.
        #
        display_coord_type = display.getCoordinateType()
        image_cache = dict()
        image_names = [
            './images/party.png', './images/desert.png',
            './images/jellyfish.png', './images/lighthouse.png',
            './images/swimming.png'
        ]

        for iname in image_names:
            image_cache[iname] = visual.ImageStim(window,
                                                  image=iname,
                                                  name=iname[iname.rfind('/') +
                                                             1:],
                                                  units=display_coord_type)

        gaze_dot = visual.GratingStim(window,
                                      tex=None,
                                      mask="gauss",
                                      pos=(0, 0),
                                      size=(66, 66),
                                      color='green',
                                      units=display_coord_type)
        instructions_text_stim = visual.TextStim(window,
                                                 text='',
                                                 pos=[0, 0],
                                                 height=24,
                                                 color=[-1, -1, -1],
                                                 colorSpace='rgb',
                                                 alignHoriz='center',
                                                 alignVert='center',
                                                 wrapWidth=window.size[0] * .9)

        # Update Instruction Text and display on screen.
        # Send Message to ioHub DataStore with Exp. Start Screen display time.
        #
        instuction_text = "Press Any Key to Start Experiment."
        instructions_text_stim.setText(instuction_text)
        instructions_text_stim.draw()
        flip_time = window.flip()
        self.hub.sendMessageEvent(text="EXPERIMENT_START", sec_time=flip_time)

        # wait until a key event occurs after the instructions are displayed
        self.hub.clearEvents('all')
        while not kb.getEvents():
            self.hub.wait(0.2)

        # Send some information to the ioHub DataStore as experiment messages
        # including the eye tracker being used for this session.
        #
        self.hub.sendMessageEvent(text="IO_HUB EXPERIMENT_INFO START")
        self.hub.sendMessageEvent(text="ioHub Experiment started {0}".format(
            getCurrentDateTimeString()))
        self.hub.sendMessageEvent(
            text="Experiment ID: {0}, Session ID: {1}".format(
                self.hub.experimentID, self.hub.experimentSessionID))
        self.hub.sendMessageEvent(
            text="Stimulus Screen ID: {0}, Size (pixels): {1}, CoordType: {2}".
            format(display.getIndex(), display.getPixelResolution(),
                   display.getCoordinateType()))
        self.hub.sendMessageEvent(
            text="Calculated Pixels Per Degree: {0} x, {1} y".format(
                *display.getPixelsPerDegree()))
        self.hub.sendMessageEvent(text="Eye Tracker being Used: {0}".format(
            selected_eyetracker_name))
        self.hub.sendMessageEvent(text="IO_HUB EXPERIMENT_INFO END")

        # randomize image order for trial of each demo session.
        #
        shuffle(image_names)

        # For each image loaded, run a trial that displays that image with a
        # gaze overlay.
        #
        for t, iname in enumerate(image_names):
            # Update the instuction screen text...
            #
            instuction_text = "Press Space Key To Start Trial %d" % t
            instructions_text_stim.setText(instuction_text)
            instructions_text_stim.draw()
            flip_time = window.flip()
            self.hub.sendMessageEvent(text="EXPERIMENT_START",
                                      sec_time=flip_time)

            start_trial = False

            # wait until a space key 'press' event occurs after the instructions are displayed
            self.hub.clearEvents('all')
            while not start_trial:
                for event in kb.getEvents(
                        event_type_id=EventConstants.KEYBOARD_PRESS):
                    if event.key == ' ':
                        start_trial = True
                        break
                self.hub.wait(0.2)

            # So request to start trial has occurred...
            # Clear the screen, start recording eye data, and clear all events
            # received to far.
            #
            flip_time = window.flip()
            self.hub.sendMessageEvent(text="TRIAL_START", sec_time=flip_time)
            self.hub.clearEvents('all')
            tracker.setRecordingState(True)

            # Get the image name for this trial
            #
            imageStim = image_cache[iname]

            # Loop until we get a keyboard event
            #
            run_trial = True
            while run_trial is True:
                # Get the latest gaze position in dispolay coord space..
                #
                gpos = tracker.getLastGazePosition()
                if isinstance(gpos, (tuple, list)):
                    # If we have a gaze position from the tracker, draw the
                    # background image and then the gaze_cursor.
                    #
                    gaze_dot.setPos(gpos)
                    imageStim.draw()
                    gaze_dot.draw()
                else:
                    # Otherwise just draw the background image.
                    #
                    imageStim.draw()

                # flip video buffers, updating the display with the stim we just
                # updated.
                #
                flip_time = window.flip()

                # Send a message to the ioHub Process / DataStore indicating
                # the time the image was drawn and current position of gaze spot.
                #
                if isinstance(gpos, (tuple, list)):
                    self.hub.sendMessageEvent("IMAGE_UPDATE %s %.3f %.3f" %
                                              (iname, gpos[0], gpos[1]),
                                              sec_time=flip_time)
                else:
                    self.hub.sendMessageEvent("IMAGE_UPDATE %s [NO GAZE]" %
                                              (iname),
                                              sec_time=flip_time)

                # Check any new keyboard char events for a space key.
                # If one is found, set the trial end variable.
                #
                for event in kb.getEvents(
                        event_type_id=EventConstants.KEYBOARD_PRESS):
                    if event.key == ' ':
                        run_trial = False
                        break

            # So the trial has ended, send a message to the DataStore
            # with the trial end time and stop recording eye data.
            # In this example, we have no use for any eye data between trials, so why save it.
            #
            flip_time = window.flip()
            self.hub.sendMessageEvent(text="TRIAL_END %d" % t,
                                      sec_time=flip_time)
            tracker.setRecordingState(False)
            self.hub.clearEvents('all')

        # Disconnect the eye tracking device.
        #
        tracker.setConnectionState(False)

        # Update the instuction screen text...
        #
        instuction_text = "Press Any Key to Exit Demo"
        instructions_text_stim.setText(instuction_text)
        instructions_text_stim.draw()
        flip_time = window.flip()
        self.hub.sendMessageEvent(text="SHOW_DONE_TEXT", sec_time=flip_time)

        # wait until any key is pressed
        self.hub.clearEvents('all')
        while not kb.getEvents(event_type_id=EventConstants.KEYBOARD_PRESS):
            self.hub.wait(0.2)

        # So the experiment is done, all trials have been run.
        # Clear the screen and show an 'experiment  done' message using the
        # instructionScreen state. What for the trigger to exit that state.
        # (i.e. the space key was pressed)
        #
        flip_time = window.flip()
        self.hub.sendMessageEvent(text='EXPERIMENT_COMPLETE',
                                  sec_time=flip_time)
コード例 #9
0
    def run(self,*args):
        
        """
        The run method contains your experiment logic. It is equal to what would be in your main psychopy experiment
        script.py file in a standard psychopy experiment setup. That is all there is too it really.
        """
        global subj_id
        global con
        
        from psychopy.iohub import module_directory
        
        exp_script_dir = module_directory(self.run)
        #exp_conditions = importConditions(os.path.join(exp_script_dir,
        #                                               'trial_conditions.xlsx'))
                                                       
        #TrialHandler(trialList, nReps, method=’random’, dataTypes=None, extraInfo=None, seed=None, originPath=None, name=’‘, autoLog=True)
        #trials = TrialHandler(exp_conditions, 1) # 1 - number of repetitions, how do we use conditions lets try to comment out
        # Inform the ioDataStore that the experiment is using a
        # TrialHandler. The ioDataStore will create a table
        # which can be used to record the actual trial variable values (DV or IV)
        # in the order run / collected.
        #
        #self.hub.createTrialHandlerRecordTable(trials)
        
        #Use Esc to quit, it will be called at some stages during the experiment
        def _checkQuit(key):
            if key[0]=='escape':
                os._exit(1)
                core.quit()
        
        ###########
        #### Experiment functions
        ###########
        def to_output(subject_id, decision, trigger_value, i_d, output_file_dir, reaction_time, reaction_time_decision_scren):

            import os.path 
            global con

            is_exist = False
            if os.path.exists(output_file_dir): is_exist = True

            # Add header to the output file if it is the first time to write to it...
            if not is_exist:    
                output_data_headers = ['Subject_id','Condition', 'Decision', 'Trigger','Item number', 'c1','c2','c3','c4','c5','c6',
                'm1','m2','m3','m4','m5','m6', 'Reaction time', 'Reaction time since decision screen start']
                      

            # Python 2
            with open(output_file_dir, 'ab') as f:

            # Python 3
            #with open(output_file_dir, 'a', newline = '') as f:

                writer = csv.writer(f)
                
                if not is_exist:
                    writer.writerows([output_data_headers])
                writer.writerows([[subject_id, con, decision, trigger_value, 
                i_d[0],i_d[1],i_d[2],i_d[3],i_d[4],i_d[5],
                i_d[6],i_d[7],i_d[8],i_d[9],i_d[10],i_d[11],i_d[12],
                reaction_time, reaction_time_decision_scren]])
                
        def to_output_eyetracking(subject_id, x, y, gazetime, trigger, output_eye_file_dir):

            import os.path 

            is_exist = False
            if os.path.exists(output_eye_file_dir): is_exist = True

            # Add header to the output file if it is the first time to write to it...
            if not is_exist:    
                output_data_headers = ['Subject_id', 'x', 'y', 'gazetime', 'Trigger']
                      

            # Python 2
            with open(output_eye_file_dir, 'ab') as f:

            # Python 3
            #with open(output_file_dir, 'a', newline = '') as f:

                writer = csv.writer(f)
                if not is_exist:
                    writer.writerows([output_data_headers])
                
                writer.writerows([[subject_id, x, y, gazetime, trigger]])
                
        def row_to_condition(row):
            txt=row
            #array_column_names 
            #print(txt.split(';'))
            a = np.empty(len(txt.split(';')))
            a = txt.split(';')
            #print('a', a)
            return a
                
        def read_input_file(csv_dir, item_number):
    
            global subject_id
            i=0
            
            with open(csv_dir, 'rb') as csvfile:
                spamreader = csv.reader(csvfile, delimiter='\n', quotechar='|')
                #print(spamreader)
               
                for row in spamreader:
                    i=i+1
                    if (i==item_number):
                        #print('row', row)
                        return row
                        
            return 0
            
        def monitor_coordinate_check(win):
            for i in range(90):
                
                
                texti = str(-450+10*i) #-display_resolution[1]/2
                
                pixel_line_y = visual.ShapeStim(win, units='pix', lineWidth=1.5,lineColor=(55,255,255),lineColorSpace='rgb255', vertices=((-750, -450+10*i),(750, -450+10*i)),closeShape=False, pos=(0, 0), size=1.2)
                pixel_name_y = visual.TextStim(win, text='y='+texti, height=10, units='pix', pos = [0,-450+10*i],color=[255,55,255],colorSpace='rgb255')
                
                texti = str(-800+i*20) #-display_resolution[0]/2
                
                pixel_line_x = visual.ShapeStim(win, units='pix', lineWidth=1.5,lineColor=(155,255,55),lineColorSpace='rgb255', vertices=((-800+i*20, -450),(-800+i*20, 450)),closeShape=False, pos=(0, 0), size=1) #what size param
                pixel_name_x = visual.TextStim(win, text=texti, height=9, units='pix', pos = [-800+i*20,0],color=[255,55,55],colorSpace='rgb255')
                
                pixel_line_x.draw()
                pixel_line_y.draw()
                pixel_name_x.draw()
                pixel_name_y.draw()
            
           
            win.flip()
            
        def draw_input(win, item_array_text, item_array_x, item_array_y):
            global con
            
            item_left = item_array_text[1:7]
            item_right = item_array_text[7:13]
            print(item_array_text, item_left, item_right)
            random.Random(con).shuffle(item_left)
            random.Random(con).shuffle(item_right)
            print(item_array_text, item_left, item_right)
            item_array_text_shuffled = item_left + item_right
            print(item_array_text_shuffled)
            
            for i in range(len(item_array_x)):
                #print(item_array_x[i], item_array_y[i], i, len(item_array_x), len(item_array_text), item_array_text)
                whitebox = visual.ShapeStim(win, units='pix', lineWidth=1.5,
                                            lineColor=(255,255,255),lineColorSpace='rgb255', 
                                            vertices=((item_array_x[i]+20, item_array_y[i]+20),
                                            (item_array_x[i]+20, item_array_y[i]-20),
                                            (item_array_x[i]-20, item_array_y[i]-20),
                                            (item_array_x[i]-20, item_array_y[i]+20)),
                                            closeShape=True, 
                                            fillColor = (255,255,255), fillColorSpace='rgb255',
                                            pos=(0, 0), size=1) #what size param
                #uncomment white box in case want to create different background on values
                #whitebox.draw() 
                
                item_value = visual.TextStim(win, text=item_array_text_shuffled[i], height=14, units='pix', #here we use i+1 because the first number is numbers item
                pos = [item_array_x[i],item_array_y[i]],color=[0,0,0],colorSpace='rgb255')
                
                item_value.draw()
                
            win.flip(clearBuffer=False)
            
        
        def logs_windows(log_text, log_key_to_proceed):

            start_message = visual.TextStim(win, text=log_text, pos = [0,0], height=35,color=[255,255,255],colorSpace='rgb255',wrapWidth=win.size[0]*.9)
            start_message.draw()
            win.flip()
            core.wait(0.2)
            key=event.waitKeys(keyList=[log_key_to_proceed])
            return key

        def instructions_blank_screen(win, output_eye_dir):
            
            #uncomment in case want to draw gaze dot
            '''timer = core.Clock()
            timer.add(0.5)
            while timer.getTime()<0:
                print('precise timing bl', timer.getTime())'''

            draw_gaze_dot(win, 1001, 0.5, output_eye_dir)
            self.hub.clearEvents('all')
        
        def instructions_fixation_cross(win, output_eye_dir):

            #inst_dir = 'Instructions\\fixation_cross.jpg'
            #instr=visual.ImageStim(win,image=inst_dir, units='pix', size = display_resolution)
            #instr.draw()
            
            fixation_cross = visual.TextStim(win, text='+', pos = [-595,345], height=54,color=[-1,-1,-1],colorSpace='rgb')
            fixation_cross.autoDraw = True
            win.flip()
            
            #uncomment in case we want to see the fixation
            draw_gaze_dot(win, 2001, 0.5, output_eye_dir) #change 0.5 seconds

            #uncomment in case of coordinate monitor greed 
            #monitor_coordinate_check(win)
            
            #comment in case of monitor coordinate check
            #win.flip() 
            fixation_cross.autoDraw = False
            self.hub.clearEvents('all')
            
        def draw_table_lines(win):
            global con
            print('con', con)
            
            table_rectangle = visual.ShapeStim(win, units='pix', lineWidth=1.5,
                                            lineColor=(25,25,25),lineColorSpace='rgb255', 
                                            vertices=((-225, 375), (200,375),(200,-395),(-225,-395)),
                                            closeShape=True,
                                            pos=(0, 0), size=1)
            table_rectangle2 = visual.ShapeStim(win, units='pix', lineWidth=1.5,
                                            lineColor=(25,25,25),lineColorSpace='rgb255', 
                                            vertices=((235, 375), (650,375),(650,-395),(235,-395)),
                                            closeShape=True,
                                            pos=(0, 0), size=1)
            line1 = visual.ShapeStim(win, units='pix', lineWidth=1.5,
                                            lineColor=(25,25,25),lineColorSpace='rgb255', 
                                            vertices=((-225, 327), (200,327)),
                                            pos=(0, 0), size=1)
            line2 = visual.ShapeStim(win, units='pix', lineWidth=1.5,
                                            lineColor=(25,25,25),lineColorSpace='rgb255', 
                                            vertices=((235, 327), (650,327)),
                                            pos=(0, 0), size=1)
            line_dotted1 = visual.Line(win, start=(-660, 280), end=(650, 280),lineColor=(25,25,25),lineColorSpace='rgb255')
            line_dotted2 = visual.Line(win, start=(-660, 148), end=(650, 148),lineColor=(25,25,25),lineColorSpace='rgb255')
            line_dotted3 = visual.Line(win, start=(-660, 40), end=(650, 40),lineColor=(25,25,25),lineColorSpace='rgb255')
            line_dotted4 = visual.Line(win, start=(-660, -70), end=(650, -70),lineColor=(25,25,25),lineColorSpace='rgb255')
            line_dotted5 = visual.Line(win, start=(-660, -175), end=(650, -175),lineColor=(25,25,25),lineColorSpace='rgb255')
            line_dotted6 = visual.Line(win, start=(-660, -284), end=(650, -284),lineColor=(25,25,25),lineColorSpace='rgb255')
            
            text = ['Number of participating countries', 'Costs to average household per \n month',
            'Share of emission represented by \nparticipating countries', 'Distribution of cost from \nimplementing the agreement',
            'Sanctions for missing emission \nreduction targets', 'Monitoring: Emission reductions \nwill be monitored by']
            
            #shuffle text, put text items in an array
            random.Random(con).shuffle(text)
            
            for i in range(6):
                start_message = visual.TextStim(win, text=text[i], pos = [-640,215-i*112], 
                                                height=24,color=[25,25,25],colorSpace='rgb255'
                                                ,wrapWidth=win.size[0]*.9, alignHoriz='left')
                start_message.draw()
            agreement_message =('Agreement 1','Agreement 2')
            agreement_message1 = visual.TextStim(win, text=agreement_message[0], pos = [-15,355], height=24,color=[25,25,25],colorSpace='rgb255',wrapWidth=win.size[0]*.9)
            agreement_message1.draw()
            agreement_message2 = visual.TextStim(win, text=agreement_message[1], pos = [440,355], height=24,color=[25,25,25],colorSpace='rgb255',wrapWidth=win.size[0]*.9)
            agreement_message2.draw()
                
            table_rectangle.draw()
            table_rectangle2.draw()
            line1.draw()
            line2.draw()
            line_dotted1.draw()
            line_dotted2.draw()
            line_dotted3.draw()
            line_dotted4.draw()
            line_dotted5.draw()
            line_dotted6.draw()
            
        def instructions_choice_decision(win, item_list_text, output_eye_dir):
            #uncomment in case we want to compare with table from the presentation experiment requirements
            #inst_dir = 'Instructions\\choice_decision.jpg'
            #instr=visual.ImageStim(win,image=inst_dir, units='pix', size = display_resolution)
            #instr.draw()
           
            item_array_x = np.array([-15, -15, -15, -15,-15, -15, 445, 445, 445, 445, 445, 445])
            item_array_y = np.array([215,105,-5,-115,-225,-335,215,105,-5,-115,-225,-335])
            
            draw_table_lines(win)
            draw_input(win, item_list_text, item_array_x, item_array_y)
            (choice, time_all, time_trial) = draw_gaze_dot(win, 3001, 10000, output_eye_dir)
            
            #comment in case want to see gazedot
            return (choice, time_all, time_trial) 
    
        def draw_trigger(win, tracker, trigger, item_number, output_file_dir, output_eye_dir):
            
            global input_file_dir
            
            choice = 0
    
            flip_time=win.flip()
            self.hub.sendMessageEvent(text="TRIAL_START",sec_time=flip_time)
            self.hub.clearEvents('all')
            tracker.setRecordingState(True) #setting it every time here - why
            
            tracker.setTriggerValue(trigger)
            input_to_make_decision = read_input_file(input_file_dir, item_number)
            input_to_make_decision_split = ''.join(input_to_make_decision)
            input_to_make_decision_split = row_to_condition(input_to_make_decision_split)
            #default gazetime and eyedata
            x,y,gazetime = 'did not catch', 'eyedata, possibly blinked', 30

            
            if (trigger == 1001):
                instructions_blank_screen(win, output_eye_dir)
            
            if (trigger == 2001):
                instructions_fixation_cross(win, output_eye_dir,)
                
            if (trigger == 3001):
                choice, choice_time_whole, choice_time_decision_screen  = instructions_choice_decision(win, input_to_make_decision_split, output_eye_dir)
                to_output(subject_id, choice, trigger, input_to_make_decision_split, output_file_dir, choice_time_whole, choice_time_decision_screen)
                
            
            flip_time=win.flip()
            self.hub.sendMessageEvent(text="TRIAL_END %d"%t,sec_time=flip_time)
            tracker.setRecordingState(False)

            self.hub.clearEvents('all')
            
            return choice
            
        def draw_gaze_dot(win, trigger, time, output_eye_dir):
            #try to paint gaze position
            #if we are not using draw gaze dot, then we would not use 'c', 'm' to transfer through the blank and fixation_cross
            #screens.  
            
            stime = getTime()
            #print(stime)
        
            while getTime()-stime < time:
                gpos = tracker.getPosition()
                #print('start time', stime, 'actual time', getTime(), 'difference', getTime()-stime, 'until <',time)

                if (gpos != None):
                    start_message = visual.TextStim(win, text='+', pos = [gpos[0],gpos[1]], height=10,color=[-1,-1,-1],colorSpace='rgb',wrapWidth=win.size[0])
                    start_message.draw()
                    
                    #start_message = visual.TextStim(win, text=str(gpos), pos = [gpos[1],gpos[0]], height=35,color=[-1,-1,-1],colorSpace='rgb',wrapWidth=win.size[0]*.9)
                    #start_message.draw()
                    win.flip(clearBuffer=False)
                    to_output_eyetracking(subject_id, gpos[0], gpos[1], getTime(), trigger, output_eye_dir)
                    core.wait(0.001)
                    
                else:
                    to_output_eyetracking(subject_id, 'did not catch eye data, possibly blinked', 'or corrupted',
                    getTime(), trigger, output_eye_dir) #getTime gives current time, here should probably print getTime from inside while
                
                self.hub.clearEvents('all') 
                
                if (trigger == 3001):
                    key = event.getKeys(keyList=['c', 'm'])
                    if key!=[]:
                        print('choice key', key, trigger, getTime())
                        return (key[0], getTime(), getTime()-stime)
                
            self.hub.clearEvents('all')
            print('events after clear events', event.getKeys(keyList=['c', 'm']))
            return 0
        

        selected_eyetracker_name=args[0]
        
        # Let's make some short-cuts to the devices we will be using in this 'experiment'.
        tracker=self.hub.devices.tracker
        display=self.hub.devices.display
        kb=self.hub.devices.keyboard
        mouse=self.hub.devices.mouse
        
        # Start by running the eye tracker default setup procedure.
        tracker.runSetupProcedure()

        # Create a psychopy window, full screen resolution, full screen mode...
        display_coord_type=display.getCoordinateType()
        display_resolution=display.getPixelResolution()
        
       
        # it is recommended to use pixle as unit espically if you are using eye tracker, because the eyetracker returns the readings in pixel
        win=visual.Window(display_resolution,monitor=display.getPsychopyMonitorName(),units='pix',fullscr=True,screen= display.getIndex(),
        waitBlanking=False) #color="white"
        
        # Hide the 'system mouse cursor'.
        # would need it for later
        mouse.setSystemCursorVisibility(False)
        event.Mouse(visible=False)
        
        

        
        #------------------------------------------------------------Experiment begins ----------------------------------------------------------------------------------------------
        
        #get time in nice format to name the csv file
        localtime=time.asctime(time.localtime(time.time()))
        localtime=localtime[11:16]+'pm-'+localtime[4:10]
        localtime=localtime.replace(":","_").replace(" ","_")
        
        #create csv file
        csv_eye_output='Exp Results\\'+subject_id+'_eyetracking_output'+localtime+'.csv'
        csv_experiment_output ='Exp Results\\'+subject_id+'_decision_output'+localtime+'.csv'
        
        
        tracker.setRecordingState(True)
        
        #draw instruction before experiment start
        
        inst_dir = 'Instructions\\Inst_2.jpg'
        instr=visual.ImageStim(win,image=inst_dir, units='pix', size = display_resolution)
        instr.draw()
        
        '''inst1 = visual.TextStim(win, text='Instruction', pos = [0,0],
                                    height=24, color=[-1,-1,-1], colorSpace='rgb',
                                    alignHoriz='center', alignVert='center',
                                    wrapWidth=win.size[0]*.9)'''
        
        flip_time=win.flip()
        self.hub.sendMessageEvent(text="EXPERIMENT_START",sec_time=flip_time)
        self.hub.clearEvents('all')
        
        key=event.waitKeys(keyList=['space'])
        
         #------------------------------------------------------------Experiment trial testing ----------------------------------------------------------------------------------------------
        
        
    
        # Send some information to the ioHub DataStore as experiment messages
        # including the eye tracker being used for this session.
        #
        self.hub.sendMessageEvent(text="IO_HUB EXPERIMENT_INFO START")
        self.hub.sendMessageEvent(text="ioHub Experiment started {0}".format(getCurrentDateTimeString()))
        self.hub.sendMessageEvent(text="Experiment ID: {0}, Session ID: {1}".format(self.hub.experimentID,self.hub.experimentSessionID))
        self.hub.sendMessageEvent(text="Stimulus Screen ID: {0}, Size (pixels): {1}, CoordType: {2}".format(display.getIndex(),display.getPixelResolution(),display.getCoordinateType()))
        self.hub.sendMessageEvent(text="Calculated Pixels Per Degree: {0} x, {1} y".format(*display.getPixelsPerDegree()))
        self.hub.sendMessageEvent(text="Eye Tracker being Used: {0}".format(selected_eyetracker_name))
        self.hub.sendMessageEvent(text="IO_HUB EXPERIMENT_INFO END")
        
        print('get current date time', "{0}".format(getCurrentDateTimeString()))
        print('experiment ID', self.hub.experimentID,'experiment session ID', self.hub.experimentSessionID) 
        print('display', "{0}".format(display.getIndex()), 'pixel resolution', "{0}".format(display.getPixelResolution()), 'coordinate type', "{0}".format(display.getCoordinateType()))
        print('pixels degree', "{0}".format(display.getPixelsPerDegree()), 'selected eyetracker', selected_eyetracker_name)

        self.hub.clearEvents('all')
        
        for t in range(2): #number of trials is 10
            self.hub.sendMessageEvent(text="TRIAL_START")
            self.hub.clearEvents('all')
            #uncomment for trials, here item number 1 is used only for testing purposes
            item_number = random.randrange(2, 11, 1)
            #item_number = 2
            
            trigger_value=1001
            draw_trigger(win, tracker, trigger_value, item_number,csv_experiment_output, csv_eye_output) #the row indexing starts from 2
            
            trigger_value=2001
            draw_trigger(win, tracker, trigger_value, item_number, csv_experiment_output, csv_eye_output)
            
            trigger_value=3001
            draw_trigger(win, tracker, trigger_value, item_number, csv_experiment_output, csv_eye_output)

            flip_time=win.flip()
            self.hub.sendMessageEvent(text='TRIAL_END',sec_time=flip_time)
            self.hub.clearEvents('all')
        

        #------------------------------------------------------------Experiment ends ----------------------------------------------------------------------------------------------

        # Disconnect the eye tracking device.
           
        # So the experiment is done, all trials have been run.
        # Clear the screen and show an 'experiment  done' message using the
        # instructionScreen state. What for the trigger to exit that state.
        # (i.e. the space key was pressed)
        #
        flip_time=win.flip()
        self.hub.sendMessageEvent(text='EXPERIMENT_COMPLETE',sec_time=flip_time)
        tracker.setRecordingState(False)
        tracker.setConnectionState(False)
        
        logs_windows("Thank you for your participation! Press ''escape'' to exit", 'escape')
        print('checkquit escape')
        _checkQuit(key)
        
        self.hub.sendMessageEvent(text="SHOW_DONE_TEXT")

        tex1=eventtxt.Eventtotext()
        print('tex1=eventtxt.Eventtotext()', tex1)
        #use try: would give an error in case of the not connected eye tracker at later stages
        tex1.convertToText(exp_script_dir,subject_id,localtime)
        self.hub.clearEvents('all')
        #self.hub.clearEvents('all', exp_script_dir) 
        
        # MANAGER ERROR WHEN SENDING MSG:[Errno 9] Bad file descriptor
        #Warning: TimeoutExpired, Killing ioHub Server process.
        
        #ioHubExperimentRuntime.shutdown()
        #print(ioHubExperimentRuntime)
        win.close()
        self.hub.quit()