コード例 #1
0
ファイル: __init__.py プロジェクト: skylergrammer/gaze_task
    def __init__(self, screen_params, display, images,
                 n_iters=10,
                 delta_t=100,
                 eyetracker=None):

        self.display = display
        self.images = images
        self.delta_t = float(delta_t)
        self.n_iters = n_iters
        self.eyetracker = eyetracker

        # Create screens for both images; doing it now means it is fast
        self.scrn1 = Screen(**screen_params)
        self.scrn1.draw_image(images[0])
        self.scrn2 = Screen(**screen_params)
        self.scrn2.draw_image(images[1])

        if self.eyetracker is not None:
            self.eyetracker.start_recording()
        else:
            sys.exit('ERROR: must attach eyetracker object!')
    def __init__(self,
                 display,
                 logfile,
                 eventdetection=c.EVENTDETECTION,
                 saccade_velocity_threshold=35,
                 saccade_acceleration_threshold=9500,
                 blink_threshold=c.BLINKTHRESH,
                 **args):

        # redefining __init__ above, so we must explicitly call the superclass' init
        TobiiProTracker.__init__(self,
                                 display,
                                 logfile,
                                 eventdetection=c.EVENTDETECTION,
                                 saccade_velocity_threshold=35,
                                 saccade_acceleration_threshold=9500,
                                 blink_threshold=c.BLINKTHRESH,
                                 **args)

        # initialize screens
        self.screen = Screen(dispsize=self.disp.dispsize)
        self.c_screen = Screen(dispsize=self.disp.dispsize)

        self.screen.set_background_colour(colour=(0, 0, 0))

        self.points_to_calibrate = [
            self._norm_2_px(p)
            for p in [(0.5, 0.5), (0.1, 0.9), (0.1, 0.1), (0.9, 0.9), (0.9,
                                                                       0.1)]
        ]

        self.datafilepath = "{0}_TOBII_output.tsv".format(logfile)
        self.datafile = open(self.datafilepath, 'w')

        # create handle for psychopy window for pre-calibration video
        self.video_win = pygaze.expdisplay
        self.video_win.mouseVisible = False
        self.video_win.size = self.disp.dispsize
コード例 #3
0
run = r.RUN

DISPSIZE = (res[0],res[1])

#find interstimulus distance, based on resolution and view distance, for 
#4' viewing angle; since PsychoPy calculates distance on centerpoint, adding
#128 (half of stimulus width)
base_dist = (2 * dist * math.tan(math.radians(4)/2))
base_dist_half = base_dist / 2
pixpcm = res[0] / res[2]
base_dist_pix = int(base_dist_half * pixpcm) + 128

# In[Initiate PyGaze Objects]:
disp = Display(disptype='psychopy')
scr = Screen(disptype='psychopy')

if inScanner or withTracker:
    kb = Keyboard()

if withTracker:
    tracker = EyeTracker(disp)
    
    DISPSIZE = cst.DISPSIZE
    
    fps_str = str(flicker).replace('.','_')
    basename = "{}_{}".format(subid, fps_str)
    
    LOGFILENAME = basename + '_eyetracker'
    director = os.getcwd()
    LOGFILE = os.path.join(director,LOGFILENAME)
コード例 #4
0
# your message
MESSAGE = "AFK; BRB"

# import stuff
import random
from pygaze.defaults import *
from pygaze import libtime
from pygaze.libscreen import Display, Screen
from pygaze.libinput import Keyboard

# start timing
libtime.expstart()

# objects
disp = Display()
scr = Screen()
kb = Keyboard(keylist=['space'], timeout=1)

# run annoying message
while kb.get_key()[0] == None:
    # colour
    col = (random.randint(0, 255), random.randint(0,
                                                  255), random.randint(0, 255))
    # position
    pos = (random.randint(0, DISPSIZE[0]), random.randint(0, DISPSIZE[1]))
    # text
    scr.draw_text(text=MESSAGE, colour=col, pos=pos, fontsize=84)
    # display
    disp.fill(scr)
    disp.show()
    # reset screen
コード例 #5
0
import random

# PyGaze
from constants import *
from pygaze.libscreen import Display, Screen
from pygaze.libinput import Keyboard
from pygaze.eyetracker import EyeTracker
from pygaze.liblog import Logfile
import pygaze.libtime as timer
from pygaze.plugins.aoi import AOI
# # # # #
# SETUP

# visuals
disp = Display()
scr = Screen()

# input
kb = Keyboard()
tracker = EyeTracker(disp)

# output
log = Logfile(filename="fs_test")
log_sub = Logfile(filename="fs_test_sub")
log.write(["ntrials", "image", "gaze_pos_x", "gaze_pos_y", "gaze_time"])
log_sub.write(["ntrials", "image", "is_found", "search_time"])
# # # # #
# PREPARE

# load instructions from file
instfile = open(INSTFILE)
コード例 #6
0
    os.mkdir(subject_folder_path)
if not os.path.exists(session_folder_path):
    os.mkdir(session_folder_path)
if not os.path.exists(fmri_folder_path):
    os.mkdir(fmri_folder_path)
if not os.path.exists(debug_folder_path):
    os.mkdir(debug_folder_path)
if withTracker and not os.path.exists(tracker_folder_path):
    os.mkdir(tracker_folder_path)

while os.path.exists(os.path.join(debug_folder_path, basename + '_debug.log')):
    basename += '+'

# In[Initiate PyGaze Objects]:
disp = Display(disptype='psychopy')
scr = Screen(disptype='psychopy')

if inScanner or withTracker:
    kb = Keyboard()

if withTracker:
    tracker = EyeTracker(disp)

    DISPSIZE = cst.DISPSIZE

    LOGFILENAME = basename + '_eyetracker'
    LOGFILE = os.path.join(cwd, LOGFILENAME)

    FGC = cst.FGC
    BGC = cst.BGC
    SACCVELTHRESH = cst.SACCVELTHRESH
コード例 #7
0
# start timing
libtime.expstart()

# create display object
disp = Display()

# create eyetracker object
tracker = eyetracker.EyeTracker(disp)

# create keyboard object
keyboard = Keyboard(keylist=['space', "q"], timeout=1)

center_of_screen = (DISPSIZE[0] / 2, DISPSIZE[1] / 2)

# create screen to draw things on
screen1 = Screen()
screen1.draw_fixation(fixtype='cross',
                      pos=center_of_screen,
                      colour=(255, 255, 255),
                      pw=5,
                      diameter=30)
screen2 = Screen()
#screen1.draw_image(base_path, pos=(center_of_screen[0]-300,center_of_screen[1]), scale=None) #need screen width
#screen2.draw_image(base_path1, pos=(center_of_screen[0]+300,center_of_screen[1]), scale=None) #need screen width
screen3 = Screen()

# Create a Screen to draw images on
#screen4 = Screen()

# calibrate eye tracker
tracker.calibrate()
コード例 #8
0
ファイル: shooting_game.py プロジェクト: AA33/PyGaze
from pygaze.libinput import Keyboard
from pygaze.eyetracker import EyeTracker

import random

# # # # #
# prep

# create keyboard object
keyboard = Keyboard()

# display object
disp = Display()

# screen objects
screen = Screen()
blankscreen = Screen()
hitscreen = Screen()
hitscreen.clear(colour=(0,255,0))
misscreen = Screen()
misscreen.clear(colour=(255,0,0))

# create eyelink objecy
eyetracker = EyeTracker(disp)

# eyelink calibration
eyetracker.calibrate()

# display surface
disp.fill(screen=blankscreen)
disp.show()
コード例 #9
0
ファイル: __init__.py プロジェクト: skylergrammer/gaze_task
class Task(object):
    '''Display k image reversals that switch every delta_t milliseconds. Pauses
       image switching if is_looking signal is False.

       Parameters
       ----------
       root : tkinter root object
       canvas : tkinter canvas object
                The canvas that images will be displayed on.
       images : list
                List of image files.  Should be in working directory or full
                paths.
       delta_t : int or float
                 time, in milliseconds, between image switches
       n_iters : int
                 number of image flases to perform before quitting
       height : int
                height of canvas in number of pixels
       width : int
               width of canvas in number of pixels
    '''
    def __init__(self, screen_params, display, images,
                 n_iters=10,
                 delta_t=100,
                 eyetracker=None):

        self.display = display
        self.images = images
        self.delta_t = float(delta_t)
        self.n_iters = n_iters
        self.eyetracker = eyetracker

        # Create screens for both images; doing it now means it is fast
        self.scrn1 = Screen(**screen_params)
        self.scrn1.draw_image(images[0])
        self.scrn2 = Screen(**screen_params)
        self.scrn2.draw_image(images[1])

        if self.eyetracker is not None:
            self.eyetracker.start_recording()
        else:
            sys.exit('ERROR: must attach eyetracker object!')

    def _flash(self, t0):

        '''Hidden method that flashes between the images in images list every
           delta_t milliseconds.
        '''
        position = self.eyetracker.sample()
        self.is_focused = check_focus(position)

        # Pause for specified milliseconds
        while elapsed(t0) < self.delta_t:
            pass

        # If eye tracker detects participant is focused switch images
        if self.is_focused:
            if self.iter_ % 2 == 0:
                self.display.fill(screen=self.scrn1)
            else:
                self.display.fill(screen=self.scrn2)
            self.display.show()
            self.iter_ += 1
        else:
            pass

    def start(self):
        '''Calls the hidden _flash() method.
        '''
        self.iter_ = 0
        while self.iter_ < self.n_iters:
            t0 = datetime.now()
            self._flash(t0)
        self.eyetracker.stop_recording()
コード例 #10
0
trials.data.addDataType('First Fixation')

#define AOI
AOI_left = AOI(aoitype="rectangle",
               pos=(center_of_screen[0] - 300 - 163,
                    center_of_screen[1] - 163),
               size=[326, 326])
AOI_right = AOI(aoitype="rectangle",
                pos=(center_of_screen[0] + 300 - 163,
                     center_of_screen[1] - 163),
                size=[326, 326])

pressed_key = None

# give trial instuctions first
instruction_screen = Screen()
instruction_screen.draw_text(
    text=
    "The practice trials will now begin. \n You will see a white cross followed by a white number. Please say the number out loud. \n You will see a pair of faces. Please watch them naturally. \n You may see a square of circle appear around an image. If you see either, please look at the image with the shape. \n Left click if it is a sqaure. Right click if it is a circle",
    pos=center_of_screen,
    colour=(255, 255, 255),
    fontsize=22)
while keyboard.get_key()[0] != "space":
    disp.fill(instruction_screen)
    disp.show()

instruction_screen.clear()
pygame.display.update()
from accl import acclimation

acclimation(center_of_screen, tracker, disp, keyboard, AOI_left, AOI_right)
コード例 #11
0
ファイル: accl.py プロジェクト: terne/cognitive_science3
def acclimation(center_of_screen, tracker, disp, keyboard, AOI_left, AOI_right):
    image_set = generate_trial_images()

    #start trials
    for index in range(0,len(image_set)):
        # make trial screens
        fixation_cross_screen = Screen()
        fixation_cross_screen.draw_fixation(fixtype='cross', pos=center_of_screen, colour=(255,255,255), pw=5, diameter=30)
        number_screen = Screen()
        number_screen.draw_text(text=str(np.random.randint(1,10)),pos = center_of_screen, colour=(255,255,255), fontsize=40)
        face_pair_screen = Screen()
        disengagement_screen = Screen()

        # start with blank screen	 for 500 ms and start recording
        disp.fill()
        disp.show()
        tracker.start_recording()
        tracker.log("start_trial %d" %index)
        trialstart = libtime.get_time()
        libtime.pause(500)

        # fixation	cross screen
        disp.fill(fixation_cross_screen)
        disp.show()
        libtime.pause(500)
        fixation_cross_screen.clear()

        # number screen
        disp.fill(number_screen)
        disp.show()
        libtime.pause(1000)
        number_screen.clear()

        #draws image pair
        image_pair = image_set[index]
        face_pair_screen.draw_image(image_pair[0], pos=(center_of_screen[0]-300,center_of_screen[1]), scale=None) #need screen width
        face_pair_screen.draw_image(image_pair[1], pos=(center_of_screen[0]+300,center_of_screen[1]), scale=None) #need screen width
        disp.fill(face_pair_screen)
        disp.show()
        
        neutral_image_index = 0
        if ("NE" in image_pair[1]):
            neutral_image_index = 1
        
        #NEED WHILE LOOP TO CAPTURE FIXATIONS AND TIME
        start_time_taken = time.time() * 1000
        total_time_taken = 0
        time_neutral = 0
        time_emotional = 0
        last_pass_time_stamp = (time.time() * 1000) - start_time_taken
        last_pass_time_taken = 0

        first_image = 0

        count_fixation_on_emotional = 0
        last_fixation_on_emotional = False
        while total_time_taken < 3000:
            pressed_key = keyboard.get_key()[0]
            if (pressed_key == 'q'):
                break

            tracker_pos = tracker.sample()
            
            if AOI_right.contains(tracker_pos):
                #Add time
                if neutral_image_index == 0:
                    time_emotional = time_emotional + last_pass_time_taken
                    if not last_fixation_on_emotional:
                        count_fixation_on_emotional = count_fixation_on_emotional + 1
                    last_fixation_on_emotional = True
                else:
                    time_neutral = time_neutral + last_pass_time_taken
                    last_fixation_on_emotional = False
                    
            elif AOI_left.contains(tracker_pos):
                #Add time
                if neutral_image_index == 0:
                    time_neutral = time_neutral + last_pass_time_taken
                    last_fixation_on_emotional = False
                else:
                    time_emotional = time_emotional + last_pass_time_taken
                    if not last_fixation_on_emotional:
                        count_fixation_on_emotional = count_fixation_on_emotional + 1
                    last_fixation_on_emotional = True


            last_pass_time_taken = (time.time() * 1000) - last_pass_time_stamp
            last_pass_time_stamp = (time.time() * 1000)
            total_time_taken = (time.time() * 1000) - start_time_taken

        if (pressed_key == 'q'):
            break

        #libtime.pause(3000) # 3000 ms of free viewing
        #image pair index 2 tells us if we need to draw a circle/square.

        #myRect_ontheleft = (center_of_screen[0]-300-163, center_of_screen[0]-300+163, center_of_screen[1]+163, center_of_screen[1]-163)
        #myRect_ontheright = (center_of_screen[0]+300-163, center_of_screen[0]+300+163, center_of_screen[1]+163, center_of_screen[1]-163)

        if (image_pair[2] == True):
            # new_face_pair_screen = swap(face_pair_screen, image_pair, tracker)

            #if ("Male" in image_pair[0]):
                #new_suffix = "_result.jpg"
            #else:
            new_suffix = circle_suffix
            if (random.choice([True, False]) == True):
                new_suffix = square_suffix

            image_pair[neutral_image_index] = image_pair[neutral_image_index].replace(regular_suffix, new_suffix)

            disengagement_screen.draw_image(image_pair[0], pos=(center_of_screen[0]-300,center_of_screen[1]), scale=None) #need screen width
            disengagement_screen.draw_image(image_pair[1], pos=(center_of_screen[0]+300,center_of_screen[1]), scale=None) #need screen width

            while keyboard.get_key()[0] == None:
                start_pos = tracker.sample()
                #face_pair_screen.draw_circle(colour=(255,255,255), pos=((start_pos[0]-center_of_screen[0]+300)**2,(start_pos[1]-center_of_screen[1])**2, 326/2))
                #disp.fill(face_pair_screen)
                #disp.show()
                if neutral_image_index == 0:
                    #area = pygame.Rect(myRect_ontheleft)
                    #pygame.draw.rect(face_pair_screen, (100, 200, 70), area)
                    #pygame.display.flip()
                    #if ((start_pos[0]-center_of_screen[0]+300)**2 + (start_pos[1]-center_of_screen[1])**2)**0.5 < 100/2:
                    if AOI_right.contains(start_pos):
                        #face_pair_screen.draw_circle(color=(255,255,255), pos=(start_pos[0]-center_of_screen[0]+300)**2,start_pos[1]-center_of_screen[1])**2), 326/2)

                        #print("you fixated on the right image:))")
                        disengagement_start_time = libtime.get_time()


                        # if fixation is started here... draw new images.
                        #if (start_pos[0] >= center_of_screen[0]-300 and start_pos[0] <= center_of_screen[0]-300+image_HW and start_pos[1] >= center_of_screen[1] and start_pos[1] <= center_of_screen[1]+image_HW):
                        face_pair_screen.clear()
                        #disengagement_screen.draw_text(text="yep", pos=center_of_screen)
                        #while keyboard.get_key()[0] == None:
                        disp.fill(disengagement_screen)
                        disp.show()
                        
                        while True:
                            start_pos = tracker.sample()
                            if AOI_left.contains(start_pos):			
                                print("you fixated on the right image:))")
                                disengagement_end_time = libtime.get_time()
                                break
                        break

                    # then wait for fixation on position of image_pair[1], i.e. the opposite
                if neutral_image_index == 1:
                    #area = pygame.Rect(myRect_ontheright)
                    #pygame.draw.rect(face_pair_screen, (100, 200, 70), area)
                    #pygame.display.flip()
                    #if ((start_pos[0]-center_of_screen[0]-300)**2 + (start_pos[1]-center_of_screen[1])**2)**0.5 < 326/2:
                    if AOI_left.contains(start_pos):
                        disengagement_start_time = libtime.get_time()
                                                                                    #if (start_pos[0] >= center_of_screen[0]+300 and start_pos[0] <= center_of_screen[0]+300+image_HW and start_pos[1] >= center_of_screen[1] and start_pos[1] <= center_of_screen[1]+image_HW):
                        face_pair_screen.clear()
                        #disengagement_screen.draw_image(image_pair[0], pos=(center_of_screen[0]-300,center_of_screen[1]), scale=None) #need screen width
                        #disengagement_screen.draw_image(image_pair[1], pos=(center_of_screen[0]+300,center_of_screen[1]), scale=None) #need screen width
                        disp.fill(disengagement_screen)
                        disp.show()
                        
                        while True:
                            start_pos = tracker.sample()
                            if AOI_right.contains(start_pos):			
                                disengagement_end_time = libtime.get_time()
                                print("Total time taken" + str(disengagement_end_time - disengagement_start_time))
                                break
                        break
        else:
            continue

        if (pressed_key == 'q'):
            break

        # end trial
        trialend = libtime.get_time()
        tracker.stop_recording()
        tracker.log("stop trial %d" % index)


        # log information in the end
        # add a way out (quit if pressing q)
        if keyboard.get_key()[0] == "q":
            break
class InfantTobiiTracker(TobiiProTracker):
    """A customised version of the pygaze TobiiProTracker.class for Tobii Pro EyeTracker objects
        display	--	a pygaze.display.Display instance
        -------------------
        TobiiProTracker docs: https://github.com/esdalmaijer/PyGaze/blob/5fd62ef10b04015552c61297306b6db251235e02/pygaze/_eyetracker/libtobii.py#L18
    """
    def __init__(self,
                 display,
                 logfile,
                 eventdetection=c.EVENTDETECTION,
                 saccade_velocity_threshold=35,
                 saccade_acceleration_threshold=9500,
                 blink_threshold=c.BLINKTHRESH,
                 **args):

        # redefining __init__ above, so we must explicitly call the superclass' init
        TobiiProTracker.__init__(self,
                                 display,
                                 logfile,
                                 eventdetection=c.EVENTDETECTION,
                                 saccade_velocity_threshold=35,
                                 saccade_acceleration_threshold=9500,
                                 blink_threshold=c.BLINKTHRESH,
                                 **args)

        # initialize screens
        self.screen = Screen(dispsize=self.disp.dispsize)
        self.c_screen = Screen(dispsize=self.disp.dispsize)

        self.screen.set_background_colour(colour=(0, 0, 0))

        self.points_to_calibrate = [
            self._norm_2_px(p)
            for p in [(0.5, 0.5), (0.1, 0.9), (0.1, 0.1), (0.9, 0.9), (0.9,
                                                                       0.1)]
        ]

        self.datafilepath = "{0}_TOBII_output.tsv".format(logfile)
        self.datafile = open(self.datafilepath, 'w')

        # create handle for psychopy window for pre-calibration video
        self.video_win = pygaze.expdisplay
        self.video_win.mouseVisible = False
        self.video_win.size = self.disp.dispsize

    # new method
    def preCalibrate(self):
        """Helps position the infant while playing a video.
        returns
            Boolean indicating whether the positioning is done (True: 'space' has been pressed)
        """
        self._write_enabled = False
        self.start_recording()

        # origin: top-left corner of precalibration box; size: tuple of lengths for box sides
        origin = (int(self.disp.dispsize[0] / 4),
                  int(self.disp.dispsize[1] / 4))
        size = (int(2 * self.disp.dispsize[0] / 4),
                int(2 * self.disp.dispsize[1] / 4))

        # Initialise a PsychoPy MovieStim
        mov = visual.MovieStim3(self.video_win, c.CALIBVIDEO, flipVert=False)

        #        print("------------> Pre-calibration process started.")
        print(
            "\t-> When correctly positioned, press \'space\' to start the calibration."
        )

        while mov.status != visual.FINISHED:
            if not self.gaze:
                continue

            self.screen.clear()

            # Add the MovieStim to a PyGaze Screen instance.
            self.screen.screen.append(mov)

            # self.gaze.append(gaze_data), gaze_data is the data structure provided by Tobii
            gaze_sample = copy.copy(self.gaze[-1])  # latest gazepoint

            validity_colour = (255, 0, 0)

            if gaze_sample['right_gaze_origin_validity'] and gaze_sample[
                    'left_gaze_origin_validity']:
                left_validity = 0.15 < gaze_sample[
                    'left_gaze_origin_in_trackbox_coordinate_system'][2] < 0.85
                right_validity = 0.15 < gaze_sample[
                    'right_gaze_origin_in_trackbox_coordinate_system'][2] < 0.85
                if left_validity and right_validity:
                    validity_colour = (0, 255, 0)

            self.screen.draw_line(colour=validity_colour,
                                  spos=origin,
                                  epos=(origin[0] + size[0], origin[1]),
                                  pw=1)
            self.screen.draw_line(colour=validity_colour,
                                  spos=origin,
                                  epos=(origin[0], origin[1] + size[1]),
                                  pw=1)
            self.screen.draw_line(colour=validity_colour,
                                  spos=(origin[0], origin[1] + size[1]),
                                  epos=(origin[0] + size[0],
                                        origin[1] + size[1]),
                                  pw=1)
            self.screen.draw_line(colour=validity_colour,
                                  spos=(origin[0] + size[0],
                                        origin[1] + size[1]),
                                  epos=(origin[0] + size[0], origin[1]),
                                  pw=1)

            right_eye, left_eye, distance = None, None, []
            if gaze_sample['right_gaze_origin_validity']:
                distance.append(
                    round(
                        gaze_sample[
                            'right_gaze_origin_in_user_coordinate_system'][2] /
                        10, 1))
                right_pos = gaze_sample[
                    'right_gaze_origin_in_trackbox_coordinate_system']
                right_eye = ((1 - right_pos[0]) * size[0] + origin[0],
                             right_pos[1] * size[1] + origin[1])
                self.screen.draw_circle(colour=validity_colour,
                                        pos=right_eye,
                                        r=int(self.disp.dispsize[0] / 100),
                                        pw=5,
                                        fill=True)

            if gaze_sample['left_gaze_origin_validity']:
                distance.append(
                    round(
                        gaze_sample[
                            'left_gaze_origin_in_user_coordinate_system'][2] /
                        10, 1))
                left_pos = gaze_sample[
                    'left_gaze_origin_in_trackbox_coordinate_system']
                left_eye = ((1 - left_pos[0]) * size[0] + origin[0],
                            left_pos[1] * size[1] + origin[1])
                self.screen.draw_circle(colour=validity_colour,
                                        pos=left_eye,
                                        r=int(self.disp.dispsize[0] / 100),
                                        pw=5,
                                        fill=True)

            self.screen.draw_text(
                text="Current distance to the eye tracker: {0} cm.".format(
                    self._mean(distance)),
                pos=(int(self.disp.dispsize[0] / 2),
                     int(self.disp.dispsize[1] * 0.9)),
                colour=(255, 255, 255),
                fontsize=20)

            self.disp.fill(self.screen)
            self.disp.show()

            key = self._getKeyPress()
            if key == "space":
                break

        # because looping doesn't seem to work
        if mov.status != visual.FINISHED:
            # pause and discard video for the audio to stop as well
            mov.pause()
            self.screen.screen.remove(mov)
            #video_win.close()
            del mov
            self.screen.clear()
            clock.pause(1000)
            return True
        else:
            return False

    # overriden method
    def calibrate(self, eventlog, calibrate=True):
        """Calibrates the eye tracker with custom child-friendly screens.
        arguments
            eventlog          --    logfile instance
        keyword arguments
            calibrate    --    Boolean indicating if calibration should be
                        performed (default = True).
        returns
            success    --     nowt, but a calibration log is added to the
                        log file and some properties are updated (i.e. the
                        thresholds for detection algorithms)
        """

        # # # #calculate thresholds (degrees to pixels) # NOT USED
        self.pxfixtresh = self._deg2pix(self.screendist, self.fixtresh,
                                        self.pixpercm)
        # in pixels per millisecons
        self.pxspdtresh = self._deg2pix(self.screendist,
                                        self.spdtresh / 1000.0, self.pixpercm)
        # in pixels per millisecond**2
        self.pxacctresh = self._deg2pix(self.screendist,
                                        self.accthresh / 1000.0, self.pixpercm)

        # calibration image file
        calibImg = c.CALIBIMG

        # initialize a sound
        snd = sound.Sound(value=c.CALIBSOUNDFILE)
        snd.setVolume(0.5)

        # image scaling range
        bit = 0.02
        scale_range = ([x / 100.0 for x in range(60, 30, -2)] +
                       [x / 100.0 for x in range(30, 60, 2)])

        if calibrate:

            if not self.eyetracker:
                print(
                    "WARNING! libtobii.TobiiProTracker.calibrate: no eye trackers found for the calibration!"
                )
                self.stop_recording()
                return False

            # Tobii calibration object
            calibration = tr.ScreenBasedCalibration(self.eyetracker)

            calibrating = True
            calibration.enter_calibration_mode()

            while calibrating:

                eventlog.write(["Calibration started at ", clock.get_time()])

                # original (normalised) points_to_calibrate = [(0.5, 0.5), (0.9, 0.1), (0.1, 0.1), (0.9, 0.9), (0.1, 0.9)]
                # pixel values are calculated ( based on the normalised points, with (1920,1200) (see __init__).
                # self.points_to_calibrate calculated values: [(960, 600), (192, 1080), (192, 120), (1728, 1080), (1728, 120)]

                # calibration for all calibration points
                for i in range(0, len(self.points_to_calibrate)):

                    point = self.points_to_calibrate[i]

                    eventlog.write([
                        "\nCalibrating point {0} at: ".format(point),
                        clock.get_time()
                    ])
                    #                    print "----------> Calibrating at point ", point

                    # play the soundfile
                    snd.play()

                    # shrink
                    scale = 1
                    for frameN in range(
                            20):  # 20 frames -> 1/3 sec shrinking (180 to 108)

                        self.c_screen.clear()
                        self.c_screen.draw_image(calibImg,
                                                 pos=point,
                                                 scale=scale)
                        drawCoreImage(self.c_screen, point, i)
                        self.disp.fill(self.c_screen)
                        self.disp.show()
                        scale = scale - bit

                    # grow and shrink until 'space' is pressed
                    s = 0
                    for frameN in range(
                            12000
                    ):  # scale down from 108 to 54, (15 frames) and back up, according to scale_range list

                        s = frameN % 30
                        scale = scale_range[s]
                        self.c_screen.clear()
                        self.c_screen.draw_image(calibImg,
                                                 pos=point,
                                                 scale=scale)
                        drawCoreImage(self.c_screen, point, i)
                        self.disp.fill(self.c_screen)
                        self.disp.show()

                        if self.kb.get_key(keylist=['space'],
                                           timeout=10,
                                           flush=False)[0] == 'space':
                            break

                    # collect results for point (Tobii)
                    normalized_point = self._px_2_norm(point)
                    collect_result = calibration.collect_data(
                        normalized_point[0], normalized_point[1])
                    eventlog.write([
                        "Collecting result for point {0} at: ".format(point),
                        clock.get_time()
                    ])

                    if collect_result != tr.CALIBRATION_STATUS_SUCCESS:
                        eventlog.write([
                            "Recollecting result for point {0} at: ".format(
                                point),
                            clock.get_time()
                        ])
                        # Try again if it didn't go well the first time.
                        # Not all eye tracker models will fail at this point, but instead fail on ComputeAndApply.
                        calibration.collect_data(normalized_point[0],
                                                 normalized_point[1])

                    # grow back to original size
                    up_scale = [
                        x / 100.0 for x in range(int(scale * 100), 100, 2)
                    ]
                    for scale in up_scale:
                        self.c_screen.clear()
                        self.c_screen.draw_image(calibImg,
                                                 pos=point,
                                                 scale=scale)
                        drawCoreImage(self.c_screen, point, i)
                        self.disp.fill(self.c_screen)
                        self.disp.show()

                    # image rolling to next point
                    # pixelised self.points_to_calibrate = [(960, 600), (192, 1080), (192, 120), (1728, 1080), (1728, 120)]
                    if (i < len(self.points_to_calibrate) - 1):
                        """
                        screen ratio: 16/10
                        -> the steps for moving the images should be (16, 10) or (8, 5)
                        """
                        # center -> bottom left / (960, 600) -> (192, 1080) - 48 frames
                        while point[0] >= self.points_to_calibrate[i + 1][0]:
                            self.c_screen.clear()
                            point = (point[0] - 16, point[1] + 10)
                            self.c_screen.draw_image(calibImg, pos=point)
                            self.disp.fill(self.c_screen)
                            self.disp.show()

                        # bottom-left -> top-left / (192, 1080) -> (192, 120)
                        # AND
                        # bottom-right -> top-right / (1728, 1080) -> (1728, 120) - 80 frames
                        while point[1] > self.points_to_calibrate[i + 1][1]:
                            self.c_screen.clear()
                            point = (point[0], point[1] - 12)
                            self.c_screen.draw_image(calibImg, pos=point)
                            self.disp.fill(self.c_screen)
                            self.disp.show()

                        # top-left -> bottom-right / (192, 120) -> (1728, 1080) - 96 frames
                        while point[0] < self.points_to_calibrate[
                                i + 1][0] and not point[
                                    1] == self.points_to_calibrate[i + 1][1]:
                            self.c_screen.clear()
                            point = (point[0] + 16, point[1] + 10)
                            self.c_screen.draw_image(calibImg, pos=point)
                            self.disp.fill(self.c_screen)
                            self.disp.show()

                # Tobii
                calibration_result = calibration.compute_and_apply()

                eventlog.write([
                    "\nCompute and apply returned {0} and collected at {1} points.\n"
                    .format(calibration_result.status,
                            len(calibration_result.calibration_points))
                ])
                print("\tCalibration: {0} - collected at {1} points.".format(
                    calibration_result.status,
                    len(calibration_result.calibration_points)))

                # Post-calibration image (while control monitor shows calibration results)
                self.c_screen.clear()
                self.c_screen.draw_image(c.ATT_IMG)
                self.disp.fill(self.c_screen)
                self.disp.show()

                if calibration_result.status != tr.CALIBRATION_STATUS_SUCCESS:
                    eventlog.write([
                        "\n\nWARNING! libtobii.TobiiProTracker.calibrate: Calibration was unsuccessful!\n\n"
                    ])
                    print("""\tCalibration was unsuccessful.\n
                          ->Press 'R' to recalibrate all points
                          ->or 'SPACE' to continue without calibration\n""")
                    key = self.kb.get_key(keylist=['space', 'r'],
                                          timeout=None)[0]
                    if key == 'r':
                        recalibration_points = [0]
                    elif key == 'space':
                        recalibration_points = []

                else:
                    # call showCalibrationResults function to present the results on screen 0. The function returns a list of recalibration points
                    logfile_dir = os.path.dirname(
                        os.path.abspath(self.datafilepath))
                    recalibration_points = showCalibrationResults(
                        logfile_dir, calibration_result)

                # if the list is empty, calibration is finished
                if len(recalibration_points) == 0:
                    eventlog.write(
                        ["\nCalibration finished at ",
                         clock.get_time()])
                    calibrating = False

                # if the list contains only '0', the calibration was unsuccessful, relalibrate all points
                elif (recalibration_points[0] == 0):
                    eventlog.write(["\nRecalibrating all points..."])
                    calibrating = True

                # if the list contains only '1', recalibrate all points despite successful calibration
                elif (recalibration_points[0] == 1):
                    eventlog.write(["\nRecalibrating all points..."])
                    for point in self.points_to_calibrate:
                        calibration.discard_data(point[0], point[1])
                    calibrating = True

                # relalibrate the returned points
                else:
                    eventlog.write([
                        "\nRecalibrating {0} points...".format(
                            len(recalibration_points))
                    ])
                    self.points_to_calibrate = [
                        self._norm_2_px(p) for p in recalibration_points
                    ]
                    for point in self.points_to_calibrate:
                        calibration.discard_data(point[0], point[1])
                    calibrating = True

        calibration.leave_calibration_mode()
        eventlog.write([" Leaving calibration mode...", clock.get_time()])
        self.stop_recording()
        self._write_enabled = True
        self.disp.close()  # leaving pygaze display

    def _getKeyPress(self):
        key = self.kb.get_key(keylist=['space', 'escape'], flush=False)[0]
        if key and key == 'escape':
            self.disp.close()
            self.close()
            sys.exit()
        elif key:
            return key
        else:
            return None
コード例 #13
0
from constants import *
from pygaze import libtime
from pygaze.libtime import clock
from pygaze.libgazecon import AOI
from pygaze.libscreen import Display, Screen
from pygaze.libinput import Keyboard
from pygaze.libinput import Mouse
from pygaze.eyetracker import EyeTracker
from pygaze import liblog
from psychopy import event
# # # # #
# SETUP

# visuals
disp = Display()
scr = Screen()
blnk = Screen()
#audio
pygame.mixer.init(frequency=44100, size=-16, buffer=2048, channels=1)
# input
mouse = Mouse(visible=True)
kb = Keyboard()

#Start tracker and calibrate
eyetracker = EyeTracker(disp)
eyetracker.calibrate()
#set up logging
log = liblog.Logfile()
#########################################################################
#Load image files
#image stimuli.
コード例 #14
0
from pygaze.libinput import Keyboard
from pygaze.eyetracker import EyeTracker
from pygaze.liblog import Logfile
import pygaze.libtime as timer

from constants import *
from client import *
from utilities import *

# # # # #
# SETUP


# visuals

scr = Screen()

# input
kb = Keyboard()

# output
log = Logfile()
log.write(["trialnr", "image", "imgtime"])
# # # # #
# PREPARE

# load instructions from file
instfile = open(INSTFILE)
instructions = instfile.read()
instfile.close()
コード例 #15
0
ファイル: camtest.py プロジェクト: AA33/PyGaze
from pygaze.libinput import Keyboard

# first, we try to import libwebcam from PyGaze
try:
	from pygaze import libwebcam
# if importing from PyGaze fails, we try to import from the current directory
except:
	import libwebcam


# # # # #
# preparation

# visual
disp = Display()
scr = Screen()

# input
kb = Keyboard()

# webcam
camlist = libwebcam.available_devices()
cam = libwebcam.Camera(dev=camlist[0], devtype=DEVTYPE, resolution=CAMRES, verflip=VFLIP, horflip=HFLIP)


# # # # #
# run camera display

# some variables
stopped = False
コード例 #16
0
from pygaze.libinput import Keyboard
from pygaze.eyetracker import EyeTracker

import random

# # # # #
# prep

# create keyboard object
keyboard = Keyboard()

# display object
disp = Display()

# screen objects
screen = Screen()
blankscreen = Screen()
hitscreen = Screen()
hitscreen.clear(colour=(0,255,0))
misscreen = Screen()
misscreen.clear(colour=(255,0,0))

# create eyelink objecy
eyetracker = EyeTracker(disp)

# eyelink calibration
eyetracker.calibrate()

# display surface
disp.fill(screen=blankscreen)
disp.show()
コード例 #17
0
our_log = liblog.Logfile()
# write "headlines" to log file
our_log.write(
    ["trialnr", "trialstart", "trialend", "disengagementtime",
     "imagepair"])  # fill in with the neccecary headlines

# calibrate the eye-tracker
tracker.calibrate()

# make the sets of images
image_set = generate()
#shuffle our image sets
shuffle(image_set)

# give instuctions first
instruction_screen = Screen()
instruction_screen.draw_text(
    text=
    "You will watch a short clip. After, the trials will begin. \n Press space to continue",
    pos=center_of_screen,
    colour=(255, 255, 255),
    fontsize=22)
while keyboard.get_key()[0] != "space":
    disp.fill(instruction_screen)
    disp.show()

instruction_screen.clear()

#call movie function - will need to switch betwwen neutral and sad
#INSERT CODE HERE
コード例 #18
0
ファイル: annoying_message.py プロジェクト: AA33/PyGaze
# co-workers to REALLY know (and possibly have a seizure)
#
# E.S. Dalmaijer, 2013

# your message
MESSAGE = "AFK; BRB"

# import stuff
import random
from pygaze.defaults import *
from pygaze.libscreen import Display, Screen
from pygaze.libinput import Keyboard

# objects
disp = Display()
scr = Screen()
kb = Keyboard(keylist=['space'],timeout=1)

# run annoying message
while kb.get_key()[0] == None:
	# colour
	col = (random.randint(0,255), random.randint(0,255), random.randint(0,255))
	# position
	pos = (random.randint(0,DISPSIZE[0]), random.randint(0,DISPSIZE[1]))
	# text
	scr.draw_text(text=MESSAGE, colour=col, pos=pos, fontsize=84)
	# display
	disp.fill(scr)
	disp.show()
	# reset screen
	scr.clear()
コード例 #19
0
from constants import *
from pygaze import libtime
from pygaze.libscreen import Display, Screen
from pygaze.eyetracker import EyeTracker
from pygaze.libinput import Keyboard
from pygaze.liblog import Logfile
from pygaze.libgazecon import FRL

# timing and initialization
libtime.expstart()

# visuals
disp = Display()
scr = Screen()

# eye tracking
tracker = EyeTracker(disp, trackertype='dummy')
frl = FRL(pos='center', dist=125, size=200)

# input collection and storage
kb = Keyboard(keylist=['escape', 'space'], timeout=None)
log = Logfile()
log.write(["trialnr", "trialstart", "trialend", "duration", "image"])

# run trials
tracker.calibrate()
for trialnr in range(0, len(IMAGES)):
    # blank display
    disp.fill()
    disp.show()
    libtime.pause(1000)
コード例 #20
0
ファイル: experiment.py プロジェクト: arnaghosh/MITACS
from pygaze.libscreen import Display, Screen
from pygaze.libinput import Keyboard
from libmpdev import *
import time
import math
import matplotlib.pyplot as plt


# create a Display to interact with the monitor
disp = Display()
 
# create a Screen to draw text on later
scr = Screen()
 
# create a Keyboard to catch key presses
kb = Keyboard(keylist=['escape'], timeout=1)
 
# create a MPy150 to communicate with a BIOPAC MP150
mp = MP150()

Y=[0];
t1 = time.time();
T=[0];

# set a starting value for key
key = None
# loop until a key is pressed
while key == None:

    # get a new sample from the MP150
    sample = mp.sample()
コード例 #21
0
ファイル: menu.py プロジェクト: Nagatoh/projeto-pygame
                self.screen.blit(item.label, item.position)

            # Draw version
            version_x = self.screen.get_rect().width - self.version.get_rect(
            ).width
            version_y = self.screen.get_rect().height - self.version.get_rect(
            ).height
            self.screen.blit(self.version, (version_x, version_y))

            # Draw languageButton

            self.screen.blit(self.langButton.label, self.langButton.position)

            ############ DISPLAY ##############
            # pygame.display.flip()
            x, y = eyetracker.sample()

            self.canvas.draw_circle(colour=(255, 0, 0),
                                    pos=(x, y),
                                    r=5,
                                    fill=True)
            disp.fill(self)
            disp.show()


#### Running
if __name__ == "__main__":
    screen = Screen()
    gm = MainMenu(screen)
    gm.run()
コード例 #22
0
ファイル: camtest.py プロジェクト: julian-tejada/PyGaze-1
from pygaze.libscreen import Display, Screen
from pygaze.libinput import Keyboard

# first, we try to import libwebcam from PyGaze
try:
    from pygaze import libwebcam
# if importing from PyGaze fails, we try to import from the current directory
except:
    import libwebcam

# # # # #
# preparation

# visual
disp = Display()
scr = Screen()

# input
kb = Keyboard()

# webcam
camlist = libwebcam.available_devices()
cam = libwebcam.Camera(dev=camlist[0],
                       devtype=DEVTYPE,
                       resolution=CAMRES,
                       verflip=VFLIP,
                       horflip=HFLIP)

# # # # #
# run camera display