예제 #1
0
frl = FRL(pos='center', dist=125, size=200)

# input collection and storage
kb = Keyboard(keylist=['escape', 'space'], timeout=None)
log = Logfile()
log.write(["trialnr", "trialstart", "trialend", "duration", "image"])

# run trials
tracker.calibrate()
for trialnr in range(0, len(IMAGES)):
    # blank display
    disp.fill()
    disp.show()
    libtime.pause(1000)
    # prepare stimulus
    scr.clear()
    scr.draw_image(IMAGES[trialnr])
    # start recording eye movements
    tracker.drift_correction()
    tracker.start_recording()
    tracker.status_msg("trial %d" % trialnr)
    tracker.log("start trial %d" % trialnr)
    # present stimulus
    response = None
    trialstart = libtime.get_time()
    while not response:
        gazepos = tracker.sample()
        frl.update(disp, scr, gazepos)
        response, presstime = kb.get_key(timeout=1)
    # stop tracking and process input
    tracker.stop_recording()
예제 #2
0
import random

# # # # #
# prep

# create keyboard object
keyboard = Keyboard()

# display object
disp = Display()

# screen objects
screen = Screen()
blankscreen = Screen()
hitscreen = Screen()
hitscreen.clear(colour=(0,255,0))
misscreen = Screen()
misscreen.clear(colour=(255,0,0))

# create eyelink objecy
eyetracker = EyeTracker(disp)

# eyelink calibration
eyetracker.calibrate()

# display surface
disp.fill(screen=blankscreen)
disp.show()

# # # # #
# game
예제 #3
0
    # multiply the sample by 255 to get the colour value,
    # then you make sure it's an integer between 0 and 255.
    red = int(sample[0] * 255)
    # Make sure red is between 0 and 255.
    if red < 0:
        red = 0
    elif red > 255:
        red = 255
    # The other colour values will be 0
    green = 0
    blue = 0
    # Now set the background colour to the new colour.
    bgc = (red, green, blue)

    # Fill the Screen with the new background colour...
    scr.clear(colour=bgc)
    # ...and write the new sample text (white letters).
    scr.draw_text(text=sampletext, colour=(255,255,255), fontsize=100)

    # Now fill the Display with the updated Screen...
    disp.fill(scr)
    # ...and update the monitor!
    disp.show()

    # Don't forget to check if there is a keypress.
    key, presstime = kb.get_key()


# Close the connection with the MP150.
mp.close()
 
예제 #4
0
import random

# # # # #
# prep

# create keyboard object
keyboard = Keyboard()

# display object
disp = Display()

# screen objects
screen = Screen()
blankscreen = Screen()
hitscreen = Screen()
hitscreen.clear(colour=(0,255,0))
misscreen = Screen()
misscreen.clear(colour=(255,0,0))

# create eyelink objecy
eyetracker = EyeTracker(disp)

# eyelink calibration
eyetracker.calibrate()

# display surface
disp.fill(screen=blankscreen)
disp.show()

# # # # #
# game
예제 #5
0
# your message
MESSAGE = "AFK; BRB"

# import stuff
import random
from pygaze.defaults import *
from pygaze.libscreen import Display, Screen
from pygaze.libinput import Keyboard

# objects
disp = Display()
scr = Screen()
kb = Keyboard(keylist=['space'],timeout=1)

# run annoying message
while kb.get_key()[0] == None:
	# colour
	col = (random.randint(0,255), random.randint(0,255), random.randint(0,255))
	# position
	pos = (random.randint(0,DISPSIZE[0]), random.randint(0,DISPSIZE[1]))
	# text
	scr.draw_text(text=MESSAGE, colour=col, pos=pos, fontsize=84)
	# display
	disp.fill(scr)
	disp.show()
	# reset screen
	scr.clear()
	
# stop the madness
disp.close()
예제 #6
0
#shuffle our image sets
shuffle(image_set)

# give instuctions first
instruction_screen = Screen()
instruction_screen.draw_text(
    text=
    "You will watch a short clip. After, the trials will begin. \n Press space to continue",
    pos=center_of_screen,
    colour=(255, 255, 255),
    fontsize=22)
while keyboard.get_key()[0] != "space":
    disp.fill(instruction_screen)
    disp.show()

instruction_screen.clear()

#call movie function - will need to switch betwwen neutral and sad
#INSERT CODE HERE

# start trials
for trialnr in range(len(image_set)):
    # make trial screens
    fixation_cross_screen = Screen()
    fixation_cross_screen.draw_fixation(fixtype='cross',
                                        pos=center_of_screen,
                                        colour=(255, 255, 255),
                                        pw=5,
                                        diameter=30)
    number_screen = Screen()
    number_screen.draw_text(text=str(np.random.randint(1, 10)),
예제 #7
0
def acclimation(center_of_screen, tracker, disp, keyboard, AOI_left, AOI_right):
    image_set = generate_trial_images()

    #start trials
    for index in range(0,len(image_set)):
        # make trial screens
        fixation_cross_screen = Screen()
        fixation_cross_screen.draw_fixation(fixtype='cross', pos=center_of_screen, colour=(255,255,255), pw=5, diameter=30)
        number_screen = Screen()
        number_screen.draw_text(text=str(np.random.randint(1,10)),pos = center_of_screen, colour=(255,255,255), fontsize=40)
        face_pair_screen = Screen()
        disengagement_screen = Screen()

        # start with blank screen	 for 500 ms and start recording
        disp.fill()
        disp.show()
        tracker.start_recording()
        tracker.log("start_trial %d" %index)
        trialstart = libtime.get_time()
        libtime.pause(500)

        # fixation	cross screen
        disp.fill(fixation_cross_screen)
        disp.show()
        libtime.pause(500)
        fixation_cross_screen.clear()

        # number screen
        disp.fill(number_screen)
        disp.show()
        libtime.pause(1000)
        number_screen.clear()

        #draws image pair
        image_pair = image_set[index]
        face_pair_screen.draw_image(image_pair[0], pos=(center_of_screen[0]-300,center_of_screen[1]), scale=None) #need screen width
        face_pair_screen.draw_image(image_pair[1], pos=(center_of_screen[0]+300,center_of_screen[1]), scale=None) #need screen width
        disp.fill(face_pair_screen)
        disp.show()
        
        neutral_image_index = 0
        if ("NE" in image_pair[1]):
            neutral_image_index = 1
        
        #NEED WHILE LOOP TO CAPTURE FIXATIONS AND TIME
        start_time_taken = time.time() * 1000
        total_time_taken = 0
        time_neutral = 0
        time_emotional = 0
        last_pass_time_stamp = (time.time() * 1000) - start_time_taken
        last_pass_time_taken = 0

        first_image = 0

        count_fixation_on_emotional = 0
        last_fixation_on_emotional = False
        while total_time_taken < 3000:
            pressed_key = keyboard.get_key()[0]
            if (pressed_key == 'q'):
                break

            tracker_pos = tracker.sample()
            
            if AOI_right.contains(tracker_pos):
                #Add time
                if neutral_image_index == 0:
                    time_emotional = time_emotional + last_pass_time_taken
                    if not last_fixation_on_emotional:
                        count_fixation_on_emotional = count_fixation_on_emotional + 1
                    last_fixation_on_emotional = True
                else:
                    time_neutral = time_neutral + last_pass_time_taken
                    last_fixation_on_emotional = False
                    
            elif AOI_left.contains(tracker_pos):
                #Add time
                if neutral_image_index == 0:
                    time_neutral = time_neutral + last_pass_time_taken
                    last_fixation_on_emotional = False
                else:
                    time_emotional = time_emotional + last_pass_time_taken
                    if not last_fixation_on_emotional:
                        count_fixation_on_emotional = count_fixation_on_emotional + 1
                    last_fixation_on_emotional = True


            last_pass_time_taken = (time.time() * 1000) - last_pass_time_stamp
            last_pass_time_stamp = (time.time() * 1000)
            total_time_taken = (time.time() * 1000) - start_time_taken

        if (pressed_key == 'q'):
            break

        #libtime.pause(3000) # 3000 ms of free viewing
        #image pair index 2 tells us if we need to draw a circle/square.

        #myRect_ontheleft = (center_of_screen[0]-300-163, center_of_screen[0]-300+163, center_of_screen[1]+163, center_of_screen[1]-163)
        #myRect_ontheright = (center_of_screen[0]+300-163, center_of_screen[0]+300+163, center_of_screen[1]+163, center_of_screen[1]-163)

        if (image_pair[2] == True):
            # new_face_pair_screen = swap(face_pair_screen, image_pair, tracker)

            #if ("Male" in image_pair[0]):
                #new_suffix = "_result.jpg"
            #else:
            new_suffix = circle_suffix
            if (random.choice([True, False]) == True):
                new_suffix = square_suffix

            image_pair[neutral_image_index] = image_pair[neutral_image_index].replace(regular_suffix, new_suffix)

            disengagement_screen.draw_image(image_pair[0], pos=(center_of_screen[0]-300,center_of_screen[1]), scale=None) #need screen width
            disengagement_screen.draw_image(image_pair[1], pos=(center_of_screen[0]+300,center_of_screen[1]), scale=None) #need screen width

            while keyboard.get_key()[0] == None:
                start_pos = tracker.sample()
                #face_pair_screen.draw_circle(colour=(255,255,255), pos=((start_pos[0]-center_of_screen[0]+300)**2,(start_pos[1]-center_of_screen[1])**2, 326/2))
                #disp.fill(face_pair_screen)
                #disp.show()
                if neutral_image_index == 0:
                    #area = pygame.Rect(myRect_ontheleft)
                    #pygame.draw.rect(face_pair_screen, (100, 200, 70), area)
                    #pygame.display.flip()
                    #if ((start_pos[0]-center_of_screen[0]+300)**2 + (start_pos[1]-center_of_screen[1])**2)**0.5 < 100/2:
                    if AOI_right.contains(start_pos):
                        #face_pair_screen.draw_circle(color=(255,255,255), pos=(start_pos[0]-center_of_screen[0]+300)**2,start_pos[1]-center_of_screen[1])**2), 326/2)

                        #print("you fixated on the right image:))")
                        disengagement_start_time = libtime.get_time()


                        # if fixation is started here... draw new images.
                        #if (start_pos[0] >= center_of_screen[0]-300 and start_pos[0] <= center_of_screen[0]-300+image_HW and start_pos[1] >= center_of_screen[1] and start_pos[1] <= center_of_screen[1]+image_HW):
                        face_pair_screen.clear()
                        #disengagement_screen.draw_text(text="yep", pos=center_of_screen)
                        #while keyboard.get_key()[0] == None:
                        disp.fill(disengagement_screen)
                        disp.show()
                        
                        while True:
                            start_pos = tracker.sample()
                            if AOI_left.contains(start_pos):			
                                print("you fixated on the right image:))")
                                disengagement_end_time = libtime.get_time()
                                break
                        break

                    # then wait for fixation on position of image_pair[1], i.e. the opposite
                if neutral_image_index == 1:
                    #area = pygame.Rect(myRect_ontheright)
                    #pygame.draw.rect(face_pair_screen, (100, 200, 70), area)
                    #pygame.display.flip()
                    #if ((start_pos[0]-center_of_screen[0]-300)**2 + (start_pos[1]-center_of_screen[1])**2)**0.5 < 326/2:
                    if AOI_left.contains(start_pos):
                        disengagement_start_time = libtime.get_time()
                                                                                    #if (start_pos[0] >= center_of_screen[0]+300 and start_pos[0] <= center_of_screen[0]+300+image_HW and start_pos[1] >= center_of_screen[1] and start_pos[1] <= center_of_screen[1]+image_HW):
                        face_pair_screen.clear()
                        #disengagement_screen.draw_image(image_pair[0], pos=(center_of_screen[0]-300,center_of_screen[1]), scale=None) #need screen width
                        #disengagement_screen.draw_image(image_pair[1], pos=(center_of_screen[0]+300,center_of_screen[1]), scale=None) #need screen width
                        disp.fill(disengagement_screen)
                        disp.show()
                        
                        while True:
                            start_pos = tracker.sample()
                            if AOI_right.contains(start_pos):			
                                disengagement_end_time = libtime.get_time()
                                print("Total time taken" + str(disengagement_end_time - disengagement_start_time))
                                break
                        break
        else:
            continue

        if (pressed_key == 'q'):
            break

        # end trial
        trialend = libtime.get_time()
        tracker.stop_recording()
        tracker.log("stop trial %d" % index)


        # log information in the end
        # add a way out (quit if pressing q)
        if keyboard.get_key()[0] == "q":
            break
class InfantTobiiTracker(TobiiProTracker):
    """A customised version of the pygaze TobiiProTracker.class for Tobii Pro EyeTracker objects
        display	--	a pygaze.display.Display instance
        -------------------
        TobiiProTracker docs: https://github.com/esdalmaijer/PyGaze/blob/5fd62ef10b04015552c61297306b6db251235e02/pygaze/_eyetracker/libtobii.py#L18
    """
    def __init__(self,
                 display,
                 logfile,
                 eventdetection=c.EVENTDETECTION,
                 saccade_velocity_threshold=35,
                 saccade_acceleration_threshold=9500,
                 blink_threshold=c.BLINKTHRESH,
                 **args):

        # redefining __init__ above, so we must explicitly call the superclass' init
        TobiiProTracker.__init__(self,
                                 display,
                                 logfile,
                                 eventdetection=c.EVENTDETECTION,
                                 saccade_velocity_threshold=35,
                                 saccade_acceleration_threshold=9500,
                                 blink_threshold=c.BLINKTHRESH,
                                 **args)

        # initialize screens
        self.screen = Screen(dispsize=self.disp.dispsize)
        self.c_screen = Screen(dispsize=self.disp.dispsize)

        self.screen.set_background_colour(colour=(0, 0, 0))

        self.points_to_calibrate = [
            self._norm_2_px(p)
            for p in [(0.5, 0.5), (0.1, 0.9), (0.1, 0.1), (0.9, 0.9), (0.9,
                                                                       0.1)]
        ]

        self.datafilepath = "{0}_TOBII_output.tsv".format(logfile)
        self.datafile = open(self.datafilepath, 'w')

        # create handle for psychopy window for pre-calibration video
        self.video_win = pygaze.expdisplay
        self.video_win.mouseVisible = False
        self.video_win.size = self.disp.dispsize

    # new method
    def preCalibrate(self):
        """Helps position the infant while playing a video.
        returns
            Boolean indicating whether the positioning is done (True: 'space' has been pressed)
        """
        self._write_enabled = False
        self.start_recording()

        # origin: top-left corner of precalibration box; size: tuple of lengths for box sides
        origin = (int(self.disp.dispsize[0] / 4),
                  int(self.disp.dispsize[1] / 4))
        size = (int(2 * self.disp.dispsize[0] / 4),
                int(2 * self.disp.dispsize[1] / 4))

        # Initialise a PsychoPy MovieStim
        mov = visual.MovieStim3(self.video_win, c.CALIBVIDEO, flipVert=False)

        #        print("------------> Pre-calibration process started.")
        print(
            "\t-> When correctly positioned, press \'space\' to start the calibration."
        )

        while mov.status != visual.FINISHED:
            if not self.gaze:
                continue

            self.screen.clear()

            # Add the MovieStim to a PyGaze Screen instance.
            self.screen.screen.append(mov)

            # self.gaze.append(gaze_data), gaze_data is the data structure provided by Tobii
            gaze_sample = copy.copy(self.gaze[-1])  # latest gazepoint

            validity_colour = (255, 0, 0)

            if gaze_sample['right_gaze_origin_validity'] and gaze_sample[
                    'left_gaze_origin_validity']:
                left_validity = 0.15 < gaze_sample[
                    'left_gaze_origin_in_trackbox_coordinate_system'][2] < 0.85
                right_validity = 0.15 < gaze_sample[
                    'right_gaze_origin_in_trackbox_coordinate_system'][2] < 0.85
                if left_validity and right_validity:
                    validity_colour = (0, 255, 0)

            self.screen.draw_line(colour=validity_colour,
                                  spos=origin,
                                  epos=(origin[0] + size[0], origin[1]),
                                  pw=1)
            self.screen.draw_line(colour=validity_colour,
                                  spos=origin,
                                  epos=(origin[0], origin[1] + size[1]),
                                  pw=1)
            self.screen.draw_line(colour=validity_colour,
                                  spos=(origin[0], origin[1] + size[1]),
                                  epos=(origin[0] + size[0],
                                        origin[1] + size[1]),
                                  pw=1)
            self.screen.draw_line(colour=validity_colour,
                                  spos=(origin[0] + size[0],
                                        origin[1] + size[1]),
                                  epos=(origin[0] + size[0], origin[1]),
                                  pw=1)

            right_eye, left_eye, distance = None, None, []
            if gaze_sample['right_gaze_origin_validity']:
                distance.append(
                    round(
                        gaze_sample[
                            'right_gaze_origin_in_user_coordinate_system'][2] /
                        10, 1))
                right_pos = gaze_sample[
                    'right_gaze_origin_in_trackbox_coordinate_system']
                right_eye = ((1 - right_pos[0]) * size[0] + origin[0],
                             right_pos[1] * size[1] + origin[1])
                self.screen.draw_circle(colour=validity_colour,
                                        pos=right_eye,
                                        r=int(self.disp.dispsize[0] / 100),
                                        pw=5,
                                        fill=True)

            if gaze_sample['left_gaze_origin_validity']:
                distance.append(
                    round(
                        gaze_sample[
                            'left_gaze_origin_in_user_coordinate_system'][2] /
                        10, 1))
                left_pos = gaze_sample[
                    'left_gaze_origin_in_trackbox_coordinate_system']
                left_eye = ((1 - left_pos[0]) * size[0] + origin[0],
                            left_pos[1] * size[1] + origin[1])
                self.screen.draw_circle(colour=validity_colour,
                                        pos=left_eye,
                                        r=int(self.disp.dispsize[0] / 100),
                                        pw=5,
                                        fill=True)

            self.screen.draw_text(
                text="Current distance to the eye tracker: {0} cm.".format(
                    self._mean(distance)),
                pos=(int(self.disp.dispsize[0] / 2),
                     int(self.disp.dispsize[1] * 0.9)),
                colour=(255, 255, 255),
                fontsize=20)

            self.disp.fill(self.screen)
            self.disp.show()

            key = self._getKeyPress()
            if key == "space":
                break

        # because looping doesn't seem to work
        if mov.status != visual.FINISHED:
            # pause and discard video for the audio to stop as well
            mov.pause()
            self.screen.screen.remove(mov)
            #video_win.close()
            del mov
            self.screen.clear()
            clock.pause(1000)
            return True
        else:
            return False

    # overriden method
    def calibrate(self, eventlog, calibrate=True):
        """Calibrates the eye tracker with custom child-friendly screens.
        arguments
            eventlog          --    logfile instance
        keyword arguments
            calibrate    --    Boolean indicating if calibration should be
                        performed (default = True).
        returns
            success    --     nowt, but a calibration log is added to the
                        log file and some properties are updated (i.e. the
                        thresholds for detection algorithms)
        """

        # # # #calculate thresholds (degrees to pixels) # NOT USED
        self.pxfixtresh = self._deg2pix(self.screendist, self.fixtresh,
                                        self.pixpercm)
        # in pixels per millisecons
        self.pxspdtresh = self._deg2pix(self.screendist,
                                        self.spdtresh / 1000.0, self.pixpercm)
        # in pixels per millisecond**2
        self.pxacctresh = self._deg2pix(self.screendist,
                                        self.accthresh / 1000.0, self.pixpercm)

        # calibration image file
        calibImg = c.CALIBIMG

        # initialize a sound
        snd = sound.Sound(value=c.CALIBSOUNDFILE)
        snd.setVolume(0.5)

        # image scaling range
        bit = 0.02
        scale_range = ([x / 100.0 for x in range(60, 30, -2)] +
                       [x / 100.0 for x in range(30, 60, 2)])

        if calibrate:

            if not self.eyetracker:
                print(
                    "WARNING! libtobii.TobiiProTracker.calibrate: no eye trackers found for the calibration!"
                )
                self.stop_recording()
                return False

            # Tobii calibration object
            calibration = tr.ScreenBasedCalibration(self.eyetracker)

            calibrating = True
            calibration.enter_calibration_mode()

            while calibrating:

                eventlog.write(["Calibration started at ", clock.get_time()])

                # original (normalised) points_to_calibrate = [(0.5, 0.5), (0.9, 0.1), (0.1, 0.1), (0.9, 0.9), (0.1, 0.9)]
                # pixel values are calculated ( based on the normalised points, with (1920,1200) (see __init__).
                # self.points_to_calibrate calculated values: [(960, 600), (192, 1080), (192, 120), (1728, 1080), (1728, 120)]

                # calibration for all calibration points
                for i in range(0, len(self.points_to_calibrate)):

                    point = self.points_to_calibrate[i]

                    eventlog.write([
                        "\nCalibrating point {0} at: ".format(point),
                        clock.get_time()
                    ])
                    #                    print "----------> Calibrating at point ", point

                    # play the soundfile
                    snd.play()

                    # shrink
                    scale = 1
                    for frameN in range(
                            20):  # 20 frames -> 1/3 sec shrinking (180 to 108)

                        self.c_screen.clear()
                        self.c_screen.draw_image(calibImg,
                                                 pos=point,
                                                 scale=scale)
                        drawCoreImage(self.c_screen, point, i)
                        self.disp.fill(self.c_screen)
                        self.disp.show()
                        scale = scale - bit

                    # grow and shrink until 'space' is pressed
                    s = 0
                    for frameN in range(
                            12000
                    ):  # scale down from 108 to 54, (15 frames) and back up, according to scale_range list

                        s = frameN % 30
                        scale = scale_range[s]
                        self.c_screen.clear()
                        self.c_screen.draw_image(calibImg,
                                                 pos=point,
                                                 scale=scale)
                        drawCoreImage(self.c_screen, point, i)
                        self.disp.fill(self.c_screen)
                        self.disp.show()

                        if self.kb.get_key(keylist=['space'],
                                           timeout=10,
                                           flush=False)[0] == 'space':
                            break

                    # collect results for point (Tobii)
                    normalized_point = self._px_2_norm(point)
                    collect_result = calibration.collect_data(
                        normalized_point[0], normalized_point[1])
                    eventlog.write([
                        "Collecting result for point {0} at: ".format(point),
                        clock.get_time()
                    ])

                    if collect_result != tr.CALIBRATION_STATUS_SUCCESS:
                        eventlog.write([
                            "Recollecting result for point {0} at: ".format(
                                point),
                            clock.get_time()
                        ])
                        # Try again if it didn't go well the first time.
                        # Not all eye tracker models will fail at this point, but instead fail on ComputeAndApply.
                        calibration.collect_data(normalized_point[0],
                                                 normalized_point[1])

                    # grow back to original size
                    up_scale = [
                        x / 100.0 for x in range(int(scale * 100), 100, 2)
                    ]
                    for scale in up_scale:
                        self.c_screen.clear()
                        self.c_screen.draw_image(calibImg,
                                                 pos=point,
                                                 scale=scale)
                        drawCoreImage(self.c_screen, point, i)
                        self.disp.fill(self.c_screen)
                        self.disp.show()

                    # image rolling to next point
                    # pixelised self.points_to_calibrate = [(960, 600), (192, 1080), (192, 120), (1728, 1080), (1728, 120)]
                    if (i < len(self.points_to_calibrate) - 1):
                        """
                        screen ratio: 16/10
                        -> the steps for moving the images should be (16, 10) or (8, 5)
                        """
                        # center -> bottom left / (960, 600) -> (192, 1080) - 48 frames
                        while point[0] >= self.points_to_calibrate[i + 1][0]:
                            self.c_screen.clear()
                            point = (point[0] - 16, point[1] + 10)
                            self.c_screen.draw_image(calibImg, pos=point)
                            self.disp.fill(self.c_screen)
                            self.disp.show()

                        # bottom-left -> top-left / (192, 1080) -> (192, 120)
                        # AND
                        # bottom-right -> top-right / (1728, 1080) -> (1728, 120) - 80 frames
                        while point[1] > self.points_to_calibrate[i + 1][1]:
                            self.c_screen.clear()
                            point = (point[0], point[1] - 12)
                            self.c_screen.draw_image(calibImg, pos=point)
                            self.disp.fill(self.c_screen)
                            self.disp.show()

                        # top-left -> bottom-right / (192, 120) -> (1728, 1080) - 96 frames
                        while point[0] < self.points_to_calibrate[
                                i + 1][0] and not point[
                                    1] == self.points_to_calibrate[i + 1][1]:
                            self.c_screen.clear()
                            point = (point[0] + 16, point[1] + 10)
                            self.c_screen.draw_image(calibImg, pos=point)
                            self.disp.fill(self.c_screen)
                            self.disp.show()

                # Tobii
                calibration_result = calibration.compute_and_apply()

                eventlog.write([
                    "\nCompute and apply returned {0} and collected at {1} points.\n"
                    .format(calibration_result.status,
                            len(calibration_result.calibration_points))
                ])
                print("\tCalibration: {0} - collected at {1} points.".format(
                    calibration_result.status,
                    len(calibration_result.calibration_points)))

                # Post-calibration image (while control monitor shows calibration results)
                self.c_screen.clear()
                self.c_screen.draw_image(c.ATT_IMG)
                self.disp.fill(self.c_screen)
                self.disp.show()

                if calibration_result.status != tr.CALIBRATION_STATUS_SUCCESS:
                    eventlog.write([
                        "\n\nWARNING! libtobii.TobiiProTracker.calibrate: Calibration was unsuccessful!\n\n"
                    ])
                    print("""\tCalibration was unsuccessful.\n
                          ->Press 'R' to recalibrate all points
                          ->or 'SPACE' to continue without calibration\n""")
                    key = self.kb.get_key(keylist=['space', 'r'],
                                          timeout=None)[0]
                    if key == 'r':
                        recalibration_points = [0]
                    elif key == 'space':
                        recalibration_points = []

                else:
                    # call showCalibrationResults function to present the results on screen 0. The function returns a list of recalibration points
                    logfile_dir = os.path.dirname(
                        os.path.abspath(self.datafilepath))
                    recalibration_points = showCalibrationResults(
                        logfile_dir, calibration_result)

                # if the list is empty, calibration is finished
                if len(recalibration_points) == 0:
                    eventlog.write(
                        ["\nCalibration finished at ",
                         clock.get_time()])
                    calibrating = False

                # if the list contains only '0', the calibration was unsuccessful, relalibrate all points
                elif (recalibration_points[0] == 0):
                    eventlog.write(["\nRecalibrating all points..."])
                    calibrating = True

                # if the list contains only '1', recalibrate all points despite successful calibration
                elif (recalibration_points[0] == 1):
                    eventlog.write(["\nRecalibrating all points..."])
                    for point in self.points_to_calibrate:
                        calibration.discard_data(point[0], point[1])
                    calibrating = True

                # relalibrate the returned points
                else:
                    eventlog.write([
                        "\nRecalibrating {0} points...".format(
                            len(recalibration_points))
                    ])
                    self.points_to_calibrate = [
                        self._norm_2_px(p) for p in recalibration_points
                    ]
                    for point in self.points_to_calibrate:
                        calibration.discard_data(point[0], point[1])
                    calibrating = True

        calibration.leave_calibration_mode()
        eventlog.write([" Leaving calibration mode...", clock.get_time()])
        self.stop_recording()
        self._write_enabled = True
        self.disp.close()  # leaving pygaze display

    def _getKeyPress(self):
        key = self.kb.get_key(keylist=['space', 'escape'], flush=False)[0]
        if key and key == 'escape':
            self.disp.close()
            self.close()
            sys.exit()
        elif key:
            return key
        else:
            return None
예제 #9
0
                      pw=5,
                      diameter=30)
screen2 = Screen()
#screen1.draw_image(base_path, pos=(center_of_screen[0]-300,center_of_screen[1]), scale=None) #need screen width
#screen2.draw_image(base_path1, pos=(center_of_screen[0]+300,center_of_screen[1]), scale=None) #need screen width
screen3 = Screen()

# Create a Screen to draw images on
#screen4 = Screen()

# calibrate eye tracker
tracker.calibrate()

#for female set
for image_pair in all_image_set:
    screen1.clear()

    #draws image pair
    screen1.draw_image(image_pair[0],
                       pos=(center_of_screen[0] - 300, center_of_screen[1]),
                       scale=None)  #need screen width
    screen1.draw_image(image_pair[1],
                       pos=(center_of_screen[0] + 300, center_of_screen[1]),
                       scale=None)  #need screen width

    #space (replace with 3 seconds)
    #current time
    pairstart = libtime.get_time()

    if (image_pair[2] == True
        ):  # if we have the addition, wait for fixation. REPLACE THE NEXT LINE