Exemplo n.º 1
0
    def __init__(self):
        self.disp = libscreen.Display(monitor=MONITOR)
        self.mouse = libinput.Mouse(visible=True)
        self.keyboard = libinput.Keyboard(
            keylist=['space', 'left', 'right', 'lctrl', 'rctrl'], timeout=None)

        self.blank_screen = libscreen.Screen()

        self.intro_screen = libscreen.Screen()

        self.intro_screen.draw_text(
            text='During each trial, a cloud of moving dots is going to \
                                            appear on the screen. Watch it carefully to detect \
                                            whether the dots in general are moving to the left or \
                                            to the right (click left mouse button to start)',
            fontsize=18)

        self.fixation_screen = libscreen.Screen()
        self.fixation_screen.draw_fixation(fixtype='cross', pw=3)

        self.initialize_ready_screen()
        self.initialize_stimulus_screen()
        self.initialize_feedback_screen()

        self.warning_sound = sound.Sound(1000, secs=0.1)
Exemplo n.º 2
0
    def ValidationFixation(self, screen, t):
        '''
		Paramètres :
			screen : écran virtuel d'affichage
			t : temps de fixation en millisecondes

		Description :
			Permet d'afficher un point sur l'écran qui devient vert lorsque l'on regarde ce point et attendre de le fixer pendant "t" millisecondes. 

		'''

        tfix = 0

        while tfix < t:  # Tant que l'on regarde pas le cercle pendant 1 seconde

            newTime = libtime.get_time()
            gazepos = self.tracker.sample()
            # si l'individu ne regarde pas le point central

            if (gazepos[0] < self.norm_2_px(
                (0.44, 0.44))[0] or gazepos[0] > self.norm_2_px(
                    (0.56, 0.56))[0]) and (gazepos[1] < self.norm_2_px(
                        (0.44, 0.44))[1] or gazepos[1] > self.norm_2_px(
                            (0.56, 0.56))[1]):
                screen = libscreen.Screen()
                #le cercle reste blanc
                screen.draw_circle(colour='white',
                                   pos=self.norm_2_px((0.5, 0.5)),
                                   r=40,
                                   pw=2,
                                   fill=True)
                self.disp.fill(screen=screen)
                self.disp.show()
                tfix = 0

            else:  # si l'individu regarde le point central
                screen = libscreen.Screen()
                #Le cercle devient vert
                screen.draw_circle(colour='green',
                                   pos=self.norm_2_px((0.5, 0.5)),
                                   r=40,
                                   pw=2,
                                   fill=True)
                self.disp.fill(screen=screen)
                self.disp.show()
                tfix += (libtime.get_time() - newTime)

            if self.kb.get_key(keylist=['space'], flush=False)[0]:
                screen.clear()
                self.disp.fill(screen=screen)
                self.disp.show()

                return ()
        screen.clear()
Exemplo n.º 3
0
    def initialize_trial_end_screen(self):
        trial_end_screen = libscreen.Screen()

        self.points_earned_stim = visual.TextStim(pygaze.expdisplay,
                                                  color='#F5F500',
                                                  pos=(0,
                                                       DISPSIZE[1] / 2 - 200),
                                                  height=40)  #fontsize=height
        trial_end_screen.screen.append(self.points_earned_stim)

        self.accumulated_points_stim = visual.TextStim(
            pygaze.expdisplay,
            color='#F5F500',  #pos=(0,DISPSIZE[1]/2-400), 
            height=30)
        trial_end_screen.screen.append(self.accumulated_points_stim)

        trial_end_instructions = visual.TextStim(pygaze.expdisplay,
                                                 pos=(0,
                                                      -DISPSIZE[1] / 2 + 100),
                                                 color='#80FF40',
                                                 text='CLICK TO CONTINUE',
                                                 height=28)
        trial_end_screen.screen.append(trial_end_instructions)

        return trial_end_screen
Exemplo n.º 4
0
    def initialize_gamble_screen(self, loc='left'):
        response_button_pos = (self.response_button_pos_right if loc == 'right'
                               else self.response_button_pos_left)
        img = self.right_response_image if loc == 'right' else self.left_response_image

        self.gamble_screen = libscreen.Screen()
        self.gamble_screen.screen.append(img)
        self.gamble_rects = []
        for i, gamble in enumerate(self.gambles):
            pos=(response_button_pos[0], response_button_pos[1]-self.response_button_size[1]/2 - \
                                                    (i+0.5)*self.gamble_button_size[1])
            rect = visual.Rect(win=pygaze.expdisplay,
                               pos=pos,
                               width=self.gamble_button_size[0],
                               height=self.gamble_button_size[1],
                               lineColor=(5, 5, 5),
                               lineColorSpace='rgb255',
                               fillColor=(255, 250, 250),
                               fillColorSpace='rgb255')
            text = visual.TextStim(win=pygaze.expdisplay,
                                   pos=pos,
                                   text=gamble,
                                   height=36,
                                   color=(5, 5, 5),
                                   colorSpace='rgb255')

            self.gamble_screen.screen.append(rect)
            self.gamble_screen.screen.append(text)
            self.gamble_rects.append(rect)
Exemplo n.º 5
0
 def presentAGTrial(self,curTrial, getInput,AGTime):
     
     libtime.pause(self.ISI)
     
     self.experiment.disp.show()
     
     if curTrial['AGType']=="image":
         #create picture
         curPic=self.pictureMatrix[curTrial['image']][0]
         curPic.pos=(0,0)
         
         #build screen
         agScreen=libscreen.Screen(disptype='psychopy')
         buildScreenPsychoPy(agScreen, [curPic])
         #wait 1 s
         libtime.pause(self.agWait1)
         #present screen
         setAndPresentScreen(self.experiment.disp, agScreen)
         #play audio
         playAndWait(self.soundMatrix[curTrial['audio']],waitFor=0)
         #display for rest of ag Time
         libtime.pause(AGTime)
     
     elif curTrial['AGType']=="movie":
         #load movie stim
         print(self.experiment.moviePath)
         print(curTrial['AGVideo'])
         mov = visual.MovieStim3(self.experiment.win, self.experiment.moviePath+curTrial['AGVideo'])
         while mov.status != visual.FINISHED:
             mov.draw()
             self.experiment.win.flip()
     
     #if getInput=True, wait for keyboard press before advancing
     if getInput:
         self.experiment.input.get_key()
Exemplo n.º 6
0
    def CDPInitialisation(self):
        self.findtracker = tr.find_all_eyetrackers()
        if self.findtracker == ():
            print("Veuillez réassayer, aucun EyeTracker détecté")
            return ()
        self.filename = 0
        self.baseFilename = 0
        self.blackdisp = libscreen.Display(screennr=int(
            self.Config.getConfiguration('DISPLAY', 'screen_number')))

        self.disp = libscreen.Display(screennr=int(
            self.Config.getConfiguration('DISPLAY', 'screen_number')))
        self.blankscreen = libscreen.Screen()

        self.tracker = CDPProTracker(
            self.disp)  # création de l'objet eyeTracker
        self.kb = Keyboard(keylist=['space', 'escape', 'q'], timeout=1)
        self.Visu = CDPBaseVisualisation(self)
        self.RecSound = libsound.Sound(
            soundfile=self.Config.getSoundDirname('2.wav'))
        self.ErrSound = libsound.Sound(
            soundfile=self.Config.getSoundDirname('punition.wav'))
        self.nameInd = 0
        self.mydisp = [self.disp]

        print("Eyetracker connecté avec succès")
Exemplo n.º 7
0
 def intialize_message_screen(self, message_file, **kwargs):
     message_screen = libscreen.Screen()
     with open(message_file) as f:
         instructions = f.read()
     instructions_stim = visual.TextStim(self.win,
                                         text=instructions,
                                         units='pix',
                                         height=self.standard_text_heigth,
                                         **kwargs)
     message_screen.screen.append(instructions_stim)
     return message_screen
Exemplo n.º 8
0
    def show_block_end_screen(self, is_practice, accumulated_points):
        self.mouse.set_visible(True)
        self.block_end_screen = libscreen.Screen()
        block_end_text = \
            '''You have completed this experimental block. \nYour score is %i points.
            \nClick left mouse button to proceed.''' % (accumulated_points)

        self.block_end_screen.draw_text(text=block_end_text, fontsize=18)
        self.disp.fill(self.block_end_screen)
        self.disp.show()
        self.mouse.get_clicked()
        libtime.pause(200)
Exemplo n.º 9
0
    def fonction_essai(self):

        ScreenVisage = libscreen.Screen()
        self.tracker.start_recording()
        ListeNomImg = [
            '/home/eyetracker/Bureau/Program/Images/pizza.jpg',
            '/home/eyetracker/Bureau/Program/Images/ExpVisages/Image_Homme_2.bmp'
        ]
        cpt = 0

        for Img in ListeNomImg:

            table = Workbook()
            gazePosSheet = table.active
            gazePosSheet.title = 'gazePos'

            gazePosSheet.append([
                "Time", "XOeilDroit", "YOeilDroit", "XOeilGauche",
                "YOeilGauche", "xRetenu", "Yretenu", "Etat"
            ])
            self.ValidationFixation(ScreenVisage, 1000)

            LisTOeilDroit = []
            LisTOeilGauche = []
            ListeOeilRetenu = []

            temps = []
            ScreenVisage.draw_image(image=Img)
            self.disp.fill(screen=ScreenVisage)
            self.disp.show()
            tdeb = libtime.get_time()

            oldTimeStamp = 0
            while libtime.get_time() - tdeb < 10000:
                time.sleep(0.010)
                t = libtime.get_time()
                NewTimeStamp, Newgazepos = self.tracker.binocular_sample()
                if NewTimeStamp != oldTimeStamp:
                    etat = self.etat_yeux(Newgazepos[0], Newgazepos[1])
                    gazePosSheet.append([
                        t, Newgazepos[0][0], Newgazepos[0][1],
                        Newgazepos[1][0], Newgazepos[1][1], Newgazepos[2][0],
                        Newgazepos[2][1], etat
                    ])
                    oldTimeStamp = NewTimeStamp

            ScreenVisage.clear()
            self.disp.fill(screen=ScreenVisage)
            self.disp.show()
            table.save('/home/eyetracker/Bureau/' + self.nameInd + '_' +
                       str(cpt) + '_Donnees.xls')
            cpt = 1
        self.tracker.stop_recording()
Exemplo n.º 10
0
 def show_block_intro_screen(self, block_size, is_practice):
     self.mouse.set_visible(True)
     self.block_intro_screen = libscreen.Screen()
     block_type = 'practice' if is_practice else 'recorded'
     self.block_intro_screen.draw_text(
         text='You are about to start the block of %d %s trials.\
                                 To start click left mouse button.' %
         (block_size, block_type),
         fontsize=18)
     self.disp.fill(self.block_intro_screen)
     self.disp.show()
     self.mouse.get_clicked()
     libtime.pause(200)
Exemplo n.º 11
0
    def initialize_feedback_screen(self):
        self.feedback_screen = libscreen.Screen()
        self.feedback_text = visual.TextStim(win=pygaze.expdisplay,
                                             colorSpace='rgb255',
                                             height=36)
        self.feedback_points_earned = visual.TextStim(win=pygaze.expdisplay,
                                                      pos=(0, -100),
                                                      colorSpace='rgb255',
                                                      height=36)
        self.feedback_accumulated_points = visual.TextStim(
            win=pygaze.expdisplay, pos=(0, -175), height=36)

        self.feedback_screen.screen.append(self.feedback_text)
        self.feedback_screen.screen.append(self.feedback_points_earned)
        self.feedback_screen.screen.append(self.feedback_accumulated_points)
Exemplo n.º 12
0
    def show_end_experiment_screen(self, scores):
        self.mouse.set_visible(True)
        self.experiment_end_screen = libscreen.Screen()
        experiment_end_text = \
            '''Congratulations! You have completed the experiment.
            Your scores for each block are displayed below. 
            \nClick left mouse button to proceed \n\n'''

        for i, score in enumerate(scores):
            experiment_end_text += 'Block %i: %i points \n' % (i + 1, score)
        self.experiment_end_screen.draw_text(text=experiment_end_text,
                                             fontsize=18)
        self.disp.fill(self.experiment_end_screen)
        self.disp.show()
        self.mouse.get_clicked()
Exemplo n.º 13
0
    def __init__(self):
        try:
            self.eyetracker = tr.find_all_eyetrackers()[0]
        except IndexError:
            messagebox.showinfo(
                "Error",
                "Tobii Eye Tracker not found. Please restart the Tobii Service\nfound in the \"Services\" application"
            )
            import sys
            sys.exit(1)

        self.gaze_data = []

        self.disp = libscreen.Display()
        self.screen = libscreen.Screen()
        self.kb = libinput.Keyboard(keylist=['space', 'escape', 'q'],
                                    timeout=1)
        self.screendist = constants.SCREENDIST

        # calibration and validation points
        lb = 0.1  # left bound
        xc = 0.5  # horizontal center
        rb = 0.9  # right bound
        ub = 0.1  # upper bound
        yc = 0.5  # vertical center
        bb = 0.9  # bottom bound
        self.points_to_calibrate = [
            self._norm_2_px(p)
            for p in [(lb, ub), (rb, ub), (xc, yc), (lb, bb), (rb, bb)]
        ]

        # maximal distance from fixation start (if gaze wanders beyond this, fixation has stopped)
        self.fixtresh = 1.5  # degrees
        # amount of time gaze has to linger within self.fixtresh to be marked as a fixation
        self.fixtimetresh = 100  # milliseconds
        # saccade velocity threshold
        self.spdtresh = 35  # degrees per second
        # saccade acceleration threshold
        self.accthresh = 9500  # degrees per second**2
        # blink detection threshold used in PyGaze method
        self.blinkthresh = 50  # milliseconds

        self.screensize = constants.SCREENSIZE  # display size in cm
        self.pixpercm = (
            self.disp.dispsize[0] / float(self.screensize[0]) +
            self.disp.dispsize[1] / float(self.screensize[1])) / 2.0
Exemplo n.º 14
0
    def addCalibrationPoint(self, indice):

        dispAffich = libscreen.Display(screennr=1)
        point = self.points_to_calibrate[indice]
        Newscreen = libscreen.Screen()

        for i in range(0, 200, 3):
            Newscreen.clear()
            Newscreen.draw_circle(colour='red',
                                  pos=point,
                                  r=int(1920 / (50 + i)),
                                  pw=5,
                                  fill=True)
            dispAffich.fill(Newscreen)
            dispAffich.show()

        dispAffich.close()
Exemplo n.º 15
0
    def initialize_ready_screen(self):
        self.ready_screen = libscreen.Screen()
        self.ready_screen.draw_text(
            text='Click the Start button to start the trial', fontsize=18)

        self.ready_button = visual.Rect(win=pygaze.expdisplay,
                                        pos=self.ready_button_pos,
                                        width=self.ready_button_size[0],
                                        height=self.ready_button_size[1],
                                        lineColor=(200, 200, 200),
                                        lineWidth=3,
                                        lineColorSpace='rgb255',
                                        fillColor=None)
        self.ready_button_text = visual.TextStim(win=pygaze.expdisplay,
                                                 text='Start',
                                                 pos=self.ready_button_pos,
                                                 height=18)
        self.ready_screen.screen.append(self.ready_button)
        self.ready_screen.screen.append(self.ready_button_text)
Exemplo n.º 16
0
    def initialize_response_screen(self):
        response_screen = libscreen.Screen()

        self.deadzone_rect = visual.Rect(win=self.win,
                                         pos=self.deadzone_pos,
                                         width=self.deadzone_size[0],
                                         height=self.deadzone_size[1],
                                         lineColor=None,
                                         fillColor=None)

        #We're using psychopy object ImageStim
        self.left_resp_img = visual.ImageStim(self.win,
                                              pos=self.leftButton_pos)
        self.right_resp_img = visual.ImageStim(self.win,
                                               pos=self.rightButton_pos)

        self.left_resp_rect = visual.Rect(win=self.win,
                                          pos=self.leftButton_pos,
                                          width=self.respButton_size[0],
                                          height=self.respButton_size[1],
                                          lineColor=None,
                                          fillColor=None)
        self.right_resp_rect = visual.Rect(win=self.win,
                                           pos=self.rightButton_pos,
                                           width=self.respButton_size[0],
                                           height=self.respButton_size[1],
                                           lineColor=None,
                                           fillColor=None)
        self.target = visual.TextStim(self.win,
                                      pos=self.target_pos,
                                      height=self.target_height,
                                      units='pix',
                                      opacity=0.0)

        # Here we specify the screeen onto which the stimuli are to be displayed
        response_screen.screen.append(self.deadzone_rect)
        response_screen.screen.append(self.left_resp_img)
        response_screen.screen.append(self.right_resp_img)
        response_screen.screen.append(self.left_resp_rect)
        response_screen.screen.append(self.right_resp_rect)
        response_screen.screen.append(self.target)

        return response_screen
Exemplo n.º 17
0
    def initialize_trial_start_screen(self):
        trial_start_screen = libscreen.Screen()

        start_button_rect = visual.Rect(win=self.win,
                                        pos=self.start_button_pos,
                                        width=self.start_button_size[0],
                                        height=self.start_button_size[1],
                                        lineColor=(200, 200, 200),
                                        lineColorSpace='rgb255',
                                        fillColor=None,
                                        lineWidth=3)

        start_button_text = visual.TextStim(self.win,
                                            text='Start',
                                            pos=self.start_button_pos)

        trial_start_screen.screen.append(start_button_rect)
        trial_start_screen.screen.append(start_button_text)

        return trial_start_screen, start_button_rect
Exemplo n.º 18
0
def startscreen():
    mainscreen = libscreen.Screen(bgc=(15, 129, 5, 0), fgc=(50, 90, 0, 55))
    image = "Backgrounds/backend.jpg"
    mainscreen.draw_image(image)
    w, h = calcresolution()
    mainscreen.draw_rect(colour=(255, 255, 255),
                         x=w / 2 - 0.5 * w / 2,
                         y=h / 2 - 0.4 * h / 2,
                         w=0.5 * w,
                         h=0.4 * h,
                         pw=1,
                         fill=True)
    mainscreen.draw_text(text='Badanie reakcji na widok\n twarzy po operacji',
                         colour=(0, 0, 0),
                         pos=None,
                         center=True,
                         font='mono',
                         fontsize=40,
                         antialias=True)
    return mainscreen
Exemplo n.º 19
0
    def initialize_stimulus_screen(self):
        self.stimuli_screen = libscreen.Screen()
        self.rdk = RDK_MN(pygaze.expdisplay)

        self.stimuli_screen.screen.append(self.rdk.dot_stim)

        self.left_response_image = visual.ImageStim(
            win=pygaze.expdisplay,
            image='resources/images/arrow_left_sq.png',
            pos=self.response_button_pos_left)
        self.left_response_rect = visual.Rect(
            win=pygaze.expdisplay,
            pos=self.response_button_pos_left,
            width=self.response_button_size[0],
            height=self.response_button_size[1],
            lineColor=(5, 5, 5),
            lineColorSpace='rgb255',
            fillColor=None)

        self.right_response_image = visual.ImageStim(
            win=pygaze.expdisplay,
            image='resources/images/arrow_right_sq.png',
            pos=self.response_button_pos_right)
        self.right_response_rect = visual.Rect(
            win=pygaze.expdisplay,
            pos=self.response_button_pos_right,
            width=self.response_button_size[0],
            height=self.response_button_size[1],
            lineColor=(5, 5, 5),
            lineColorSpace='rgb255',
            fillColor=None)

        self.stimuli_screen.screen.append(self.left_response_image)
        self.stimuli_screen.screen.append(self.left_response_rect)
        self.stimuli_screen.screen.append(self.right_response_image)
        self.stimuli_screen.screen.append(self.right_response_rect)
Exemplo n.º 20
0
    def CDPFixationPoint(self, tfixation, Name, tol, x, y):

        self.Visu.blackscreen()
        datej = datetime.datetime.now().strftime("%d-%m-%Y")

        date = datetime.datetime.now().strftime("%d-%m-%Y_%H:%M:%S")

        table = load_workbook(
            self.Config.getDataDirname('FixationPoint') + '/' + Name + '.xlsx')
        mysheet = table.active
        ListeResultat = []
        txp = 0
        tdeb = 0
        tfix = 0
        cptperte = 0
        self.tracker.start_recording()
        #table = Workbook()
        #gazePosSheet = table.active
        #gazePosSheet.title = 'gazePos'
        Result = 1
        #gazePosSheet.append(["Time", "XOeilDroit", "YOeilDroit","XOeilGauche","YOeilGauche","xRetenu","Yretenu","Etat"])
        pospxl = self.norm_2_px((x, y))

        self.Visu.draw_AOI_fix(pospxl[0], pospxl[1], tol)
        self.Visu.VisuShow()
        screen = libscreen.Screen()
        cptdefauteyetracker = 0
        #le cercle reste blanc
        screen.draw_circle(colour='white', pos=pospxl, r=30, pw=2, fill=True)
        self.disp.fill(screen=screen)
        self.disp.show()
        oldTimeStamp = 0
        tdebxp = libtime.get_time()
        txp, Newgazepos = self.tracker.binocular_sample()
        while tdeb < 3000 and not ((Newgazepos[2][0] >
                                    (pospxl[0] - tol) and Newgazepos[2][0] <
                                    (pospxl[0] + tol)) and
                                   (Newgazepos[2][1] >
                                    (pospxl[1] - tol) and Newgazepos[2][1] <
                                    (pospxl[1] + tol))):
            print(tdeb)
            time.sleep(0.005)
            NewTimeStamp, Newgazepos = self.tracker.binocular_sample()
            self.Visu.Show_gaze(Newgazepos[2][0], Newgazepos[2][1])
            newTime = libtime.get_time()
            if NewTimeStamp != oldTimeStamp:
                t = int(NewTimeStamp - txp) / 1000
                etat = self.etat_yeux(Newgazepos[0], Newgazepos[1])
                #gazePosSheet.append([t, Newgazepos[0][0],Newgazepos[0][1],Newgazepos[1][0],Newgazepos[1][1],Newgazepos[2][0],Newgazepos[2][1],etat])
                oldTimeStamp = NewTimeStamp
            tdeb = (libtime.get_time() - tdebxp)

        screen = libscreen.Screen()

        if tdeb < 3000:

            while tfix < tfixation and cptperte < 300:

                newTime = libtime.get_time()
                time.sleep(0.005)
                NewTimeStamp, Newgazepos = self.tracker.binocular_sample()
                self.Visu.Show_gaze(Newgazepos[2][0], Newgazepos[2][1])
                self.Visu.VisuShow()
                #si l'individu ne regarde pas le point central

                if (Newgazepos[2][0] > (pospxl[0] - tol) and Newgazepos[2][0] <
                    (pospxl[0] + tol)) and (Newgazepos[2][1] >
                                            (pospxl[1] - tol)
                                            and Newgazepos[2][1] <
                                            (pospxl[1] + tol)):
                    screen = libscreen.Screen()
                    #Le cercle devient vert
                    #screen.draw_circle(colour=(int(128-(tfix*128/tfixation)),128,0), pos= pospxl, r=30, pw=2, fill=True)
                    #screen.draw_circle(colour='green', pos= pospxl, r=30, pw=2, fill=True)
                    screen.draw_circle(colour='white',
                                       pos=pospxl,
                                       r=30,
                                       pw=2,
                                       fill=True)

                    self.disp.fill(screen=screen)
                    self.disp.show()
                    tfix += (libtime.get_time() - newTime)
                    cptperte = 0
                    cptdefauteyetracker = 0

                else:  # si l'individu regarde le point central
                    if Newgazepos[2] == (-1, -1):
                        cptdefauteyetracker += (libtime.get_time() - newTime)
                    cptperte += (libtime.get_time() - newTime)

                if NewTimeStamp != oldTimeStamp:
                    t = int(NewTimeStamp - txp) / 1000
                    etat = self.etat_yeux(Newgazepos[0], Newgazepos[1])
                    #gazePosSheet.append([t, Newgazepos[0][0],Newgazepos[0][1],Newgazepos[1][0],Newgazepos[1][1],Newgazepos[2][0],Newgazepos[2][1],etat])
                    oldTimeStamp = NewTimeStamp

            if tfix > tfixation:
                self.RecSound.play()
                Result = 0
        self.tracker.stop_recording()

        if tdeb > 3000:
            Resulat = 1
            self.ErrSound.play()
            screen = libscreen.Screen(bgc='white')
            self.disp.fill(screen=screen)
            self.disp.show()
            time.sleep(3)

        elif tfix < tfixation:
            if cptdefauteyetracker > 200:
                Result = 2
            else:
                Result = 3
            self.ErrSound.play()
            screen = libscreen.Screen(bgc='white')
            self.disp.fill(screen=screen)
            self.disp.show()
            time.sleep(3)

        mysheet.append([datej, x, y, tol, tfixation, Result])

        #table.save(self.Config.getDataDirname('FixationPoint') + '/' + Name +'_' +  date + '_' +  Result + '_' + str(tfixation) + 'ms' + '.xls')
        table.save(
            self.Config.getDataDirname('FixationPoint') + '/' + Name + '.xlsx')

        print('fin')

        screen = libscreen.Screen()
        self.disp.fill(screen=screen)
        self.disp.show()
Exemplo n.º 21
0
    def CDPcontroleNictation(self):
        '''
		Description :
			Après avoir fixé un point central pendant 500 ms, une image de mer (paysage calm et apsiant) apparait pednant 30 sec puis un autre point a fixer et une 
			image de "Ou est Charlie" apparait et l'individu doit trouver Charlier (COncentration). Cette foction renvoie un tableau excel avec les données de l'eye tracker
			permettant d'effectuer un contrôle psitiif sur les nictations de l'individu
		'''

        date = datetime.datetime.now().strftime("%d-%m-%Y_%H:%M:%S")
        self.tracker.start_recording()
        name = 'Charlie'
        ScreenNict = libscreen.Screen()
        Img = self.Config.getImageDirname('ControlNictation') + '/Charlie.jpg'

        table = Workbook()
        gazePosSheet = table.active
        gazePosSheet.title = 'gazePos'

        informationsheet = table.create_sheet("Information")
        gazePosSheet.append([
            "Time", "XOeilDroit", "YOeilDroit", "XOeilGauche", "YOeilGauche",
            "xRetenu", "Yretenu", "Etat"
        ])
        informationsheet.append(["NomImg", "xImage", "yImage"])
        print("Début de l'expérience")
        self.ValidationFixation(ScreenNict, 500)

        ScreenNict.draw_image(image=Img)
        self.disp.fill(screen=ScreenNict)
        self.disp.show()

        tdeb = libtime.get_time()
        oldTimeStamp = 0
        txp, gase = self.tracker.binocular_sample()
        while libtime.get_time() - tdeb < 30000:
            time.sleep(0.01)
            NewTimeStamp, Newgazepos = self.tracker.binocular_sample()
            if NewTimeStamp != oldTimeStamp:
                t = int(NewTimeStamp - txp) / 1000
                etat = self.etat_yeux(Newgazepos[0], Newgazepos[1])
                gazePosSheet.append([
                    t, Newgazepos[0][0], Newgazepos[0][1], Newgazepos[1][0],
                    Newgazepos[1][1], Newgazepos[2][0], Newgazepos[2][1], etat
                ])
                oldTimeStamp = NewTimeStamp
            if self.kb.get_key(keylist=['space'], flush=False)[0]:
                ScreenNict.clear()
                self.disp.fill(screen=ScreenNict)
                self.disp.show()
                self.tracker.stop_recording()
                print("Fin de l'expérience")

                return ()

        ScreenNict.clear()
        self.disp.fill(screen=ScreenNict)
        self.disp.show()

        informationsheet.append([name + '.jpg', 960, 540])

        table.save(
            '/home/eyetracker/Bureau/Data/Experiences/ControlNictation/' +
            self.nameInd + '_' + name + '_' + date + '.xls')
        print("Fin de l'expérience")

        self.tracker.stop_recording()
Exemplo n.º 22
0
#pytesseract.pytesseract.tesseract_cmd ='C:\\Program Files (x86)\\Tesseract-OCR\\tessdata'

menuMap = parseMenu()

# create display object
disp = libscreen.Display(disptype='psychopy', dispsize=(1200,800)) 

# create eyetracker object
tracker = eyetracker.EyeTracker(disp)

# create logfile object
log = liblog.Logfile()

# create screens
mainScreen = libscreen.Screen(dispsize=(1200,800))
#mainScreen.draw_text(text="When you see a cross, look at it and press space. Then make an eye movement to the black circle when it appears.\n\n(press space to start)", fontsize=24)
mainScreen.draw_image(image="./documents/vorder/vorder_test_1/test_menu_images/test_menu_2.jpeg")

#stores the amount of blinks that fall within a microsecond apart
blinkCount = 0


# calibrate eye tracker
tracker.calibrate()
print(tracker.connected())

#print("gazePosition_X: %s and gazePosition_Y: %s" %(tracker.sample()[0], tracker.sample()[1]))

# Should return the closest item on the menu to where the customers current gave it
def findClosestItem(x, y):
Exemplo n.º 23
0
    def CDPExplorationVisage(self, name, xfix, yfix):

        xfix = float(xfix)
        yfix = float(yfix)
        datedeb = datetime.datetime.now().strftime("%d-%m-%Y_%H:%M:%S")
        os.mkdir(
            self.Config.getDataDirname('Exploration Visages') + '/' + name +
            '_' + datedeb)

        #SetImageFile = self.SelectionSetIndividu(name)

        SetImageFile = [
            self.Config.getImageDirname(
                'Exploration Visages/anu/Anubis_02_200110DSC06806.resized.jpg'
            ),
            self.Config.getImageDirname(
                'Exploration Visages/anu/Anubis_Ref_200123DSC07844.resized.jpg'
            ),
            self.Config.getImageDirname(
                'Exploration Visages/bar/Barnabe_Ref_200123DSC07867.resized.jpg'
            ),
            self.Config.getImageDirname(
                'Exploration Visages/bar/Barnabe_04_200123DSC07859.resized.jpg'
            ),
            self.Config.getImageDirname(
                'Exploration Visages/ces/Cesar_03_200129DSC08442.resized.jpg'),
            self.Config.getImageDirname(
                'Exploration Visages/ces/Cesar_Ref_200123DSC07860.resized.jpg')
        ]
        random.shuffle(SetImageFile)

        self.tracker.start_recording()
        print("L'expérience a débuté")
        for img in SetImageFile:
            poscercle = self.norm_2_px((xfix, yfix))

            date = datetime.datetime.now().strftime("%d-%m-%Y_%H:%M:%S")
            imgname = os.path.basename(img)
            imgname = os.path.splitext(imgname)[0]
            table = Workbook()
            gazePosSheet = table.active
            gazePosSheet.title = 'gazePos'
            informationsheet = table.create_sheet("Information")
            informationsheet.append(
                ["NomImg", "xImage", "yImage", "xdep", "ydep"])
            gazePosSheet.append([
                "Time", "XOeilDroit", "YOeilDroit", "XOeilGauche",
                "YOeilGauche", "xRetenu", "Yretenu", "Etat"
            ])
            informationsheet.append(
                [img, 960, 702, poscercle[0], poscercle[1]])
            screen = libscreen.Screen()

            #le cercle reste blanc

            screen.draw_circle(colour='white',
                               pos=poscercle,
                               r=30,
                               pw=2,
                               fill=True)
            self.disp.fill(screen=screen)
            self.disp.show()
            tfix = 0
            cptperte = 0
            while tfix < 250:

                if self.kb.get_key(keylist=['space'], flush=False)[0]:
                    screen.clear()
                    self.disp.fill(screen=screen)
                    self.disp.show()
                    self.tracker.stop_recording()

                    return ()

                newTime = libtime.get_time()
                time.sleep(0.005)
                NewTimeStamp, Newgazepos = self.tracker.binocular_sample()

                # si l'individu ne regarde pas le point central

                if (Newgazepos[2][0] < poscercle[0] - 100
                        or Newgazepos[2][0] > poscercle[0] + 100) or (
                            Newgazepos[2][1] < poscercle[1] - 100
                            or Newgazepos[2][1] > poscercle[1] + 100):
                    cptperte += (libtime.get_time() - newTime)
                    if cptperte > 300:
                        tfix = 0
                        screen.draw_circle(colour='white',
                                           pos=poscercle,
                                           r=30,
                                           pw=2,
                                           fill=True)
                        self.disp.fill(screen=screen)
                        self.disp.show()

                else:  # si l'individu regarde le point
                    screen = libscreen.Screen()
                    #Le cercle devient vert
                    screen.draw_circle(colour=(int(128 - (tfix * 128 / 250)),
                                               128, 0),
                                       pos=poscercle,
                                       r=30,
                                       pw=2,
                                       fill=True)
                    self.disp.fill(screen=screen)
                    self.disp.show()
                    tfix += (libtime.get_time() - newTime)
                    cptperte = 0

            screen.clear()
            screen.draw_image(image=img, pos=(960, 702))
            self.disp.fill(screen=screen)
            self.disp.show()

            tdeb = libtime.get_time()
            oldTimeStamp = 0
            txp, gase = self.tracker.binocular_sample()

            while libtime.get_time() - tdeb < 4000:
                time.sleep(0.01)
                NewTimeStamp, Newgazepos = self.tracker.binocular_sample()
                if NewTimeStamp != oldTimeStamp:
                    t = int(NewTimeStamp - txp) / 1000
                    etat = self.etat_yeux(Newgazepos[0], Newgazepos[1])
                    gazePosSheet.append([
                        t, Newgazepos[0][0], Newgazepos[0][1],
                        Newgazepos[1][0], Newgazepos[1][1], Newgazepos[2][0],
                        Newgazepos[2][1], etat
                    ])
                    oldTimeStamp = NewTimeStamp

            screen.clear()
            self.disp.fill(screen=screen)
            self.disp.show()
            self.RecSound.play()
            table.save(
                self.Config.getDataDirname('Exploration Visages') + '/' +
                name + '_' + datedeb + '/' + name + '_' + imgname + '_' +
                date + '.xls')

            time.sleep(2)
        print("L'expérience est terminée")
        self.tracker.stop_recording()
Exemplo n.º 24
0
    def initializeExperiment(self):
        
        print "Loading files..."
        loadScreen = libscreen.Screen()
        loadScreen.draw_text(text="Loading Files...",colour="lightgray", fontsize=48)
        self.experiment.disp.fill(loadScreen)
        self.experiment.disp.show()
        self.imageScreen=libscreen.Screen(disptype="psychopy")
        imageName=self.experiment.imagePath+"bunnies.png"
        image=visual.ImageStim(self.experiment.win, imageName,mask=None,interpolate=True) 
        image.setPos((0,0))
        buildScreenPsychoPy(self.imageScreen,[image]) 
        setAndPresentScreen(self.experiment.disp,self.imageScreen)
        
        self.imageScreen2=libscreen.Screen(disptype="psychopy")
        imageName2=self.experiment.imagePath+"puppies.png"
        image2=visual.ImageStim(self.experiment.win, imageName2,mask=None,interpolate=True) 
        image2.setPos((0,0))
        buildScreenPsychoPy(self.imageScreen2,[image2])
        
        trialsPath = 'trialLists/Bird_trialList_%s_%s.csv' % (self.experiment.subjVariables['subjCode'],self.experiment.subjVariables['seed'])
        
        if not os.path.isfile(trialsPath):
            print 'Trials file not found. Creating...'
            Birdsong_generateTrials.main(self.experiment.subjVariables['subjCode'],self.experiment.subjVariables['seed'])

        
        (self.trialListMatrix, self.trialFieldNames) = importTrials(trialsPath, method="sequential")
        
        #number of trials
        self.numTrials = 12
        
        #load sound files
        self.pictureMatrix = loadFiles('stimuli/images', ['png',], 'image', self.experiment.win)
        self.soundMatrix = loadFiles('stimuli/sounds',['wav'], 'sound')
        
        #load AG movie
        self.mov = visual.MovieStim3(self.experiment.win, self.experiment.moviePath+"pinwheel.mov",loop=True,noAudio=True,size=(600,445))
        
        self.locations=['center']
        self.pos={'center': (0,0)}
        
        
        self.posDims=(600,600)
        self.posImageDims=(400.0,400.0)
        
        #set duration of pauses between trial events
        #this may need to be changed
        self.ISI=500
        self.screenPause=500
        self.BoxStep = 20
        self.BoxStepCount = 600
        self.picStepSize = 1
        
        self.AGTimeOut = 20000
        self.AGFixCount = 30
        self.lookAwayPos = (-1920,-1200)
        
        
        #gaze contingent params
        self.aoiCenter=libgazecon.AOI('rectangle',pos=(640,280),size=[640,640])
        self.aoiCenterMovie=libgazecon.AOI('rectangle',pos=(560,300),size=[800,600])
        self.aoiScreen=libgazecon.AOI('rectangle',pos=(0,0),size=[1920,1200])
            
        #all geometric objects to be drawn
        self.AGCircle=visual.Circle(self.experiment.win, radius=100, fillColor="green",lineColor="green")
        self.rect = newRect(self.experiment.win,self.posDims,self.pos['center'],"white")
        self.grayRect = newRect(self.experiment.win,(600,300),(0,450),"lightgray")

        print "Files Loaded!"
Exemplo n.º 25
0
from psychopy import visual
from psychopy import core
import matplotlib.pyplot as plt

import os
import constants
import Waveforms_v2 as wf

import numpy as np

disp = libscreen.Display()

kb_space = libinput.Keyboard(keylist=['space'], timeout=None)
kb_response = libinput.Keyboard(keylist=['s', 'v', 'q'], timeout=None)

scr = libscreen.Screen()
scr.draw_text(
    'Starting the ITD demo!\n\nPress S for sound first\n\nPress V for visual first\n\n--Space bar to start--',
    fontsize=24)
disp.fill(scr)
disp.show()
kb_space.get_key()

ITD_array = np.linspace(-1, 1, num=80)
ITD_size = 400
np.random.shuffle(ITD_array)
response_array = []

sound = wf.waveform(wavetype='wn', duration=0.1)
box = visual.Circle(pygaze.expdisplay, radius=200)
Exemplo n.º 26
0
    def presentTrial(self,curTrial,curTrialIndex):
        #self.checkExit()
        #self.experiment.disp.show()
        
        #random jitter prior to trial start
        libtime.pause(self.ISI+random.choice([0,100,200])) 
        
        
        
        #######start eye tracking##########
        if self.experiment.subjVariables['eyetracker']=="yes":
            self.experiment.tracker.start_recording()
            #log data on trial
            self.experiment.tracker.log("Experiment %s subjCode %s seed %s TrialNumber %s TrialType %s audio %s image %s" % (self.experiment.expName, self.experiment.subjVariables['subjCode'],str(self.experiment.subjVariables['seed']),str(curTrialIndex),curTrial['audioType'], curTrial['audio'],curTrial['image']))
        #start trial timer
        trialTimerStart=libtime.get_time()
        
        

        #create ag screen
        #agScreen=libscreen.Screen(disptype='psychopy')
        agScreenTime=libtime.get_time()  
        if self.experiment.subjVariables['eyetracker']=="yes":
            #log event
            self.experiment.tracker.log("agStart")
        
        agCount = 0
        keyBreak = False
        movPlaying = True
        self.mov.play()
        while self.mov.status != visual.FINISHED and libtime.get_time() - agScreenTime < self.AGTimeOut:
            self.mov.draw()
            self.experiment.win.flip()
            
            libtime.pause(10)
            
            if self.experiment.subjVariables['activeMode']=="input" and self.experiment.subjVariables['inputDevice']=="keyboard":
                for key in event.getKeys():
                    if key == 'space':
                        if self.mov.status == visual.PLAYING:
                            self.mov.pause()
                            self.experiment.win.flip()
                            movPlaying = False
                        keyBreak = True
                        
                if keyBreak:
                    break
            else:  
                if self.experiment.subjVariables['activeMode']=="gaze":
                    inputpos = self.experiment.tracker.sample()
                elif self.experiment.subjVariables['activeMode']=="input" and self.experiment.subjVariables['inputDevice']=="mouse":
                    inputpos = self.experiment.input.get_pos()
            
            
                if self.aoiCenterMovie.contains(inputpos):
                    agCount += 1
                if agCount > self.AGFixCount:
                    #if self.mov.status == visual.PLAYING:
                    break
                    
        if movPlaying:       
            if self.mov.status == visual.PLAYING:
                self.mov.pause()
                self.experiment.win.flip()
                    
        #print libtime.get_time() - agScreenTime
                
        if self.experiment.subjVariables['eyetracker']=="yes":
                    #log event
                    self.experiment.tracker.log("agEnd")
            
        #create starting screen
        startScreen=libscreen.Screen(disptype='psychopy')
        curPic=self.pictureMatrix[str(curTrial['image'])][0]
        curPicCoord=self.pos['center']
        curPic.setPos(curPicCoord)
        curPic.size = (300,300)
        curBox = self.pictureMatrix[str(curTrial['box'])][0]
        curBox.size = self.posDims
        curBox.pos = self.pos['center']
        buildScreenPsychoPy(startScreen,[self.rect,curPic,curBox,self.grayRect])
        
        #present starting screen
        setAndPresentScreen(self.experiment.disp, startScreen)
        startScreenTime=libtime.get_time()  
        if self.experiment.subjVariables['eyetracker']=="yes":
            #log event
            self.experiment.tracker.log("startScreen")
        
        libtime.pause(self.screenPause)
        
        
        #slide screen up
        for i in range(0,self.BoxStepCount+1,self.BoxStep):
                #set up box
                curBox.pos=(self.pos['center'][0],i)
                #add new screen
                curScreen=libscreen.Screen(disptype='psychopy')
                #add stimuli to the screen
                buildScreenPsychoPy(curScreen,[self.rect,curPic,curBox,self.grayRect])
                setAndPresentScreen(self.experiment.disp,curScreen)
                if i==self.BoxStepCount:
                    screenUpTime=libtime.get_time()
                    if self.experiment.subjVariables['eyetracker']=="yes":
                        #log screen slide event
                        self.experiment.tracker.log("screenUp")
        
        #play audio
        self.soundMatrix[curTrial['audio']].play()
        audioStartTime=libtime.get_time()
        if self.experiment.subjVariables['eyetracker']=="yes":
            #log audio event
            self.experiment.tracker.log("audioStart")
        
    
        
        ######Contingent Procedure######
        lookProcedureTimes = self.watchProcedure(curTrial,audioStartTime,curTrial['audioDur'],1000,self.aoiScreen,looming=True,curPic=curPic,stim1=self.rect,stim2=curBox,stim3=self.grayRect)
        #print lookProcedureTimes
        self.soundMatrix[curTrial['audio']].stop()
        self.experiment.disp.show()
        audioEndTime=libtime.get_time()-audioStartTime
        if self.experiment.subjVariables['eyetracker']=="yes":
            #log audio end event
            self.experiment.tracker.log("audioEnd")
        
        ######Stop Eyetracking######
        
        #trialEndTime
        trialTimerEnd=libtime.get_time()
        #trial time
        trialTime=trialTimerEnd-trialTimerStart
        if self.experiment.subjVariables['eyetracker']=="yes":
            #stop eye tracking
            self.experiment.tracker.stop_recording()
        
        #######Save data#########
        
        fieldVars=[]
        for curField in self.trialFieldNames:
            fieldVars.append(curTrial[curField])
   
        [header, curLine] = createRespNew(self.experiment.subjInfo, self.experiment.subjVariables, self.trialFieldNames, fieldVars,
                                        a_curTrialIndex=curTrialIndex,
                                        b_expTimer=trialTimerEnd,
                                        c_trialStart=trialTimerStart,
                                        d_trialTime=trialTime,
                                        e_totalTime = lookProcedureTimes[0],
                                        f_lookAways = lookProcedureTimes[1],
                                        g_totalLookingTime = lookProcedureTimes[2],
                                        h_totalLookingTimeNS = lookProcedureTimes[3],
                                        i_agScreenTime = agScreenTime,
                                        j_startScreenTime = startScreenTime,
                                        k_audioStartTime=audioStartTime,
                                        l_audioEndTime = audioEndTime
                                        )
        
        writeToFile(self.experiment.outputFile,curLine)
Exemplo n.º 27
0
                    pos = [0, 0])

target = visual.Circle(pygaze.expdisplay,
                    size = 20,
                    fillColor=[-1,-1,-1],
                    lineColor=[0,0,0],
                    pos = [100, 200])
distractor = visual.Circle(pygaze.expdisplay,
                    size = 20,
                    fillColor=[1, 1, 1],
                    lineColor=[1, 1, 1],
                    pos = [200, 100])

dist = 0

fixscreen = libscreen.Screen()
fixscreen.screen.append(fix)

targetscreen = libscreen.Screen()
targetscreen.screen.append(target)
targetscreen.screen.append(fix)

distractorscreen = libscreen.Screen()
distractorscreen.screen.append(target)
distractorscreen.screen.append(distractor)
distractorscreen.screen.append(fix)

resultscreen = libscreen.Screen()

tracker.calibrate()
Exemplo n.º 28
0
# # # # #
# setup the experiment

# create display object
disp = libscreen.Display(disptype='opensesame')

# create keyboard object
kb = libinput.Keyboard(disptype='opensesame', keylist=['left','right','escape'], timeout=2000)

# create logfile object
log = liblog.Logfile()
log.write(["trialnr", "trialtype", "response", "RT", "correct"])

# create screens
fixscreen = libscreen.Screen(disptype='opensesame')
fixscreen.draw_fixation(fixtype='cross',pw=2)
targetscreens = {}
targetscreens['left'] = libscreen.Screen(disptype='opensesame')
targetscreens['left'].draw_circle(pos=(w*0.25,h/2), fill=True)
targetscreens['right'] = libscreen.Screen(disptype='opensesame')
targetscreens['right'].draw_circle(pos=(w*0.75,h/2), fill=True)
feedbackscreens = {}
feedbackscreens[1] = libscreen.Screen(disptype='opensesame')
feedbackscreens[1].draw_text(text='correct', colour=(0,255,0))
feedbackscreens[0] = libscreen.Screen(disptype='opensesame')
feedbackscreens[0].draw_text(text='incorrect', colour=(255,0,0))

# # # # #
# run the experiment
# create display object
disp = libscreen.Display()

# create eyetracker object
tracker = eyetracker.EyeTracker(disp)

# create keyboard object
keyboard = libinput.Keyboard(keylist=['space'], timeout=None)

# create logfile object
log = liblog.Logfile()
log.write(["trialnr", "trialtype", "endpos", "latency", "correct"])

# create screens
inscreen = libscreen.Screen()
inscreen.draw_text(
    text=
    "When you see a cross, look at it and press space. Then make an eye movement to the black circle when it appears.\n\n(press space to start)",
    fontsize=24)
fixscreen = libscreen.Screen()
fixscreen.draw_fixation(fixtype='cross', pw=3)
targetscreens = {}
targetscreens['left'] = libscreen.Screen()
targetscreens['left'].draw_circle(pos=(int(constants.DISPSIZE[0] * 0.25),
                                       constants.DISPSIZE[1] / 2),
                                  fill=True)
targetscreens['right'] = libscreen.Screen()
targetscreens['right'].draw_circle(pos=(int(constants.DISPSIZE[0] * 0.75),
                                        constants.DISPSIZE[1] / 2),
                                   fill=True)
Exemplo n.º 30
0
    def watchProcedure(self,curTrial,startTime,maxTime,maxLookAwayTime, aoi,looming=False,curPic=None,stim1=None,stim2=None,stim3=None):
        totalLookingTime = 0
        nonLookingTimes = []
        transitionNonLookingTimes = []
        lookAways = 0
        curNonLookingTime = 0
        looking = True
        nonLook = False
        curLookAwayTime = 0
        responded = True
        counter=0.0
        direction=1
        startValues=(300,300)
        loomCounter = 0
        transition=False
        #list to store last 150 ms of looking
        last150ms=[]
        #store current location to initiate checking of when looks go off screen
        if self.experiment.subjVariables['activeMode']=="gaze":
            lastInputpos = self.experiment.tracker.sample()
            if lastInputpos == self.lookAwayPos:
                transitionNonLookingTimeOnset = libtime.get_time()
        elif self.experiment.subjVariables['activeMode']=="input" and self.experiment.subjVariables['inputDevice']=="mouse":
            lastInputpos = self.experiment.input.get_pos()
            if lastInputpos == self.lookAwayPos:
                transitionNonLookingTimeOnset = libtime.get_time()
            
        while libtime.get_time() - startTime < maxTime and curLookAwayTime < maxLookAwayTime:
            if self.experiment.subjVariables['activeMode']=="input" and self.experiment.subjVariables['inputDevice']=="keyboard":
                for key in event.getKeys():
                    if key == 'space' and looking ==True:
                        responded = False
                        event.clearEvents()
                        transitionNonLookingTimeOnset = libtime.get_time()
                    elif key == 'space' and looking ==False:
                        responded = True
                        event.clearEvents()
                    
            else:
                libtime.pause(10)
                #get gaze/ mouse position
                if self.experiment.subjVariables['activeMode']=="gaze":
                    curInputpos = self.experiment.tracker.sample()
                elif self.experiment.subjVariables['activeMode']=="input":
                    curInputpos = self.experiment.input.get_pos()
                #mark transition time
                if curInputpos == self.lookAwayPos and lastInputpos != self.lookAwayPos:
                    transition=True
                    transitionNonLookingTimeOnset = libtime.get_time()
                else:
                    transition = False
                
                ####smoothing eyetracking/mousetracking sample###
                
                ##add cur gaze position to the list
                last150ms.append(curInputpos)
                #
                ##if the length of the list exceeds 150 ms/25==6, then delete the earliest item in the lis
                ## 25 ms because an average run through the while loop takes between 20-30 ms
                if len(last150ms)>6:
                    del last150ms[0]
                
                ##Now, remove the (no looking data) tuples
                last150msClean=[e for e in last150ms if e != self.lookAwayPos]
                ##Now calculate the mean
                if len(last150msClean)>0:
                    #calculate mean
                    #looks a bit tricky, but that's just because I think the gaze positions are stored as tuples, which is a bit of an odd data structure.
                    inputpos=tuple(map(lambda y: sum(y) / float(len(y)), zip(*last150msClean)))
                else:
                    inputpos=self.lookAwayPos
 
                ####smoothing procedure end###
                
                responded = aoi.contains(inputpos)
                                
                #update last gaze position
                lastInputpos = curInputpos
              
            if not responded and looking:
                nonLookingTimeOnset = libtime.get_time()
                looking = False
                lookAways +=1
                nonLook = True
                
            
            
            if responded:
                if not looking:
                    looking = True
                    nonLookOnset = False
                    curNonLookingTime=libtime.get_time()-nonLookingTimeOnset
                    curTransitionNonLookingTime=libtime.get_time()- transitionNonLookingTimeOnset
                    nonLookingTimes.append(curNonLookingTime)
                    transitionNonLookingTimes.append(curTransitionNonLookingTime)
            
            if looking:
                curLookAwayTime = 0
            else:
                curLookAwayTime = libtime.get_time() - nonLookingTimeOnset
                if curLookAwayTime > maxLookAwayTime:
                    nonLookingTimes.append(curLookAwayTime)
                    curTransitionNonLookingTime=libtime.get_time()- transitionNonLookingTimeOnset
                    transitionNonLookingTimes.append(curTransitionNonLookingTime)
            
            if looming:
                  
                #update screen
                newScreen=libscreen.Screen(disptype='psychopy')
                counter +=1
                if counter > 100:
                    direction=(-1)*direction
                    counter = 0.0
                    startValues=(xSize,ySize)
                xSize = self.easeInOut(startValues[0],200.0,counter,100,direction)
                ySize = self.easeInOut(startValues[1],200.0,counter,100,direction)
                curPic.size = (xSize,ySize)
                buildScreenPsychoPy(newScreen,[stim1,curPic,stim2,stim3])
                setAndPresentScreen(self.experiment.disp, newScreen)
            
                
                    
        totalTime=libtime.get_time()-startTime
        totalLookingTime = totalTime - sum(nonLookingTimes)
        totalLookingTimeNonSmoothed = totalTime - sum(transitionNonLookingTimes)
        return [totalTime, lookAways, totalLookingTime,totalLookingTimeNonSmoothed]