def __init__(self,
                 display,
                 logfile,
                 eventdetection=c.EVENTDETECTION,
                 saccade_velocity_threshold=35,
                 saccade_acceleration_threshold=9500,
                 blink_threshold=c.BLINKTHRESH,
                 **args):

        # redefining __init__ above, so we must explicitly call the superclass' init
        TobiiProTracker.__init__(self,
                                 display,
                                 logfile,
                                 eventdetection=c.EVENTDETECTION,
                                 saccade_velocity_threshold=35,
                                 saccade_acceleration_threshold=9500,
                                 blink_threshold=c.BLINKTHRESH,
                                 **args)

        # initialize screens
        self.screen = Screen(dispsize=self.disp.dispsize)
        self.c_screen = Screen(dispsize=self.disp.dispsize)

        self.screen.set_background_colour(colour=(0, 0, 0))

        self.points_to_calibrate = [
            self._norm_2_px(p)
            for p in [(0.5, 0.5), (0.1, 0.9), (0.1, 0.1), (0.9, 0.9), (0.9,
                                                                       0.1)]
        ]

        self.datafilepath = "{0}_TOBII_output.tsv".format(logfile)
        self.datafile = open(self.datafilepath, 'w')

        # create handle for psychopy window for pre-calibration video
        self.video_win = pygaze.expdisplay
        self.video_win.mouseVisible = False
        self.video_win.size = self.disp.dispsize
Exemplo n.º 2
0
                self.screen.blit(item.label, item.position)

            # Draw version
            version_x = self.screen.get_rect().width - self.version.get_rect(
            ).width
            version_y = self.screen.get_rect().height - self.version.get_rect(
            ).height
            self.screen.blit(self.version, (version_x, version_y))

            # Draw languageButton

            self.screen.blit(self.langButton.label, self.langButton.position)

            ############ DISPLAY ##############
            # pygame.display.flip()
            x, y = eyetracker.sample()

            self.canvas.draw_circle(colour=(255, 0, 0),
                                    pos=(x, y),
                                    r=5,
                                    fill=True)
            disp.fill(self)
            disp.show()


#### Running
if __name__ == "__main__":
    screen = Screen()
    gm = MainMenu(screen)
    gm.run()
Exemplo n.º 3
0
from constants import *
from pygaze import libtime
from pygaze.libscreen import Display, Screen
from pygaze.eyetracker import EyeTracker
from pygaze.libinput import Keyboard
from pygaze.liblog import Logfile
from pygaze.libgazecon import FRL

# timing and initialization
libtime.expstart()

# visuals
disp = Display()
scr = Screen()

# eye tracking
tracker = EyeTracker(disp, trackertype='dummy')
frl = FRL(pos='center', dist=125, size=200)

# input collection and storage
kb = Keyboard(keylist=['escape', 'space'], timeout=None)
log = Logfile()
log.write(["trialnr", "trialstart", "trialend", "duration", "image"])

# run trials
tracker.calibrate()
for trialnr in range(0, len(IMAGES)):
    # blank display
    disp.fill()
    disp.show()
    libtime.pause(1000)
Exemplo n.º 4
0
from pygaze.libinput import Keyboard
from pygaze.eyetracker import EyeTracker

import random

# # # # #
# prep

# create keyboard object
keyboard = Keyboard()

# display object
disp = Display()

# screen objects
screen = Screen()
blankscreen = Screen()
hitscreen = Screen()
hitscreen.clear(colour=(0,255,0))
misscreen = Screen()
misscreen.clear(colour=(255,0,0))

# create eyelink objecy
eyetracker = EyeTracker(disp)

# eyelink calibration
eyetracker.calibrate()

# display surface
disp.fill(screen=blankscreen)
disp.show()
Exemplo n.º 5
0
run = r.RUN

DISPSIZE = (res[0],res[1])

#find interstimulus distance, based on resolution and view distance, for 
#4' viewing angle; since PsychoPy calculates distance on centerpoint, adding
#128 (half of stimulus width)
base_dist = (2 * dist * math.tan(math.radians(4)/2))
base_dist_half = base_dist / 2
pixpcm = res[0] / res[2]
base_dist_pix = int(base_dist_half * pixpcm) + 128

# In[Initiate PyGaze Objects]:
disp = Display(disptype='psychopy')
scr = Screen(disptype='psychopy')

if inScanner or withTracker:
    kb = Keyboard()

if withTracker:
    tracker = EyeTracker(disp)
    
    DISPSIZE = cst.DISPSIZE
    
    fps_str = str(flicker).replace('.','_')
    basename = "{}_{}".format(subid, fps_str)
    
    LOGFILENAME = basename + '_eyetracker'
    director = os.getcwd()
    LOGFILE = os.path.join(director,LOGFILENAME)
Exemplo n.º 6
0
our_log = liblog.Logfile()
# write "headlines" to log file
our_log.write(
    ["trialnr", "trialstart", "trialend", "disengagementtime",
     "imagepair"])  # fill in with the neccecary headlines

# calibrate the eye-tracker
tracker.calibrate()

# make the sets of images
image_set = generate()
#shuffle our image sets
shuffle(image_set)

# give instuctions first
instruction_screen = Screen()
instruction_screen.draw_text(
    text=
    "You will watch a short clip. After, the trials will begin. \n Press space to continue",
    pos=center_of_screen,
    colour=(255, 255, 255),
    fontsize=22)
while keyboard.get_key()[0] != "space":
    disp.fill(instruction_screen)
    disp.show()

instruction_screen.clear()

#call movie function - will need to switch betwwen neutral and sad
#INSERT CODE HERE
Exemplo n.º 7
0
def acclimation(center_of_screen, tracker, disp, keyboard, AOI_left, AOI_right):
    image_set = generate_trial_images()

    #start trials
    for index in range(0,len(image_set)):
        # make trial screens
        fixation_cross_screen = Screen()
        fixation_cross_screen.draw_fixation(fixtype='cross', pos=center_of_screen, colour=(255,255,255), pw=5, diameter=30)
        number_screen = Screen()
        number_screen.draw_text(text=str(np.random.randint(1,10)),pos = center_of_screen, colour=(255,255,255), fontsize=40)
        face_pair_screen = Screen()
        disengagement_screen = Screen()

        # start with blank screen	 for 500 ms and start recording
        disp.fill()
        disp.show()
        tracker.start_recording()
        tracker.log("start_trial %d" %index)
        trialstart = libtime.get_time()
        libtime.pause(500)

        # fixation	cross screen
        disp.fill(fixation_cross_screen)
        disp.show()
        libtime.pause(500)
        fixation_cross_screen.clear()

        # number screen
        disp.fill(number_screen)
        disp.show()
        libtime.pause(1000)
        number_screen.clear()

        #draws image pair
        image_pair = image_set[index]
        face_pair_screen.draw_image(image_pair[0], pos=(center_of_screen[0]-300,center_of_screen[1]), scale=None) #need screen width
        face_pair_screen.draw_image(image_pair[1], pos=(center_of_screen[0]+300,center_of_screen[1]), scale=None) #need screen width
        disp.fill(face_pair_screen)
        disp.show()
        
        neutral_image_index = 0
        if ("NE" in image_pair[1]):
            neutral_image_index = 1
        
        #NEED WHILE LOOP TO CAPTURE FIXATIONS AND TIME
        start_time_taken = time.time() * 1000
        total_time_taken = 0
        time_neutral = 0
        time_emotional = 0
        last_pass_time_stamp = (time.time() * 1000) - start_time_taken
        last_pass_time_taken = 0

        first_image = 0

        count_fixation_on_emotional = 0
        last_fixation_on_emotional = False
        while total_time_taken < 3000:
            pressed_key = keyboard.get_key()[0]
            if (pressed_key == 'q'):
                break

            tracker_pos = tracker.sample()
            
            if AOI_right.contains(tracker_pos):
                #Add time
                if neutral_image_index == 0:
                    time_emotional = time_emotional + last_pass_time_taken
                    if not last_fixation_on_emotional:
                        count_fixation_on_emotional = count_fixation_on_emotional + 1
                    last_fixation_on_emotional = True
                else:
                    time_neutral = time_neutral + last_pass_time_taken
                    last_fixation_on_emotional = False
                    
            elif AOI_left.contains(tracker_pos):
                #Add time
                if neutral_image_index == 0:
                    time_neutral = time_neutral + last_pass_time_taken
                    last_fixation_on_emotional = False
                else:
                    time_emotional = time_emotional + last_pass_time_taken
                    if not last_fixation_on_emotional:
                        count_fixation_on_emotional = count_fixation_on_emotional + 1
                    last_fixation_on_emotional = True


            last_pass_time_taken = (time.time() * 1000) - last_pass_time_stamp
            last_pass_time_stamp = (time.time() * 1000)
            total_time_taken = (time.time() * 1000) - start_time_taken

        if (pressed_key == 'q'):
            break

        #libtime.pause(3000) # 3000 ms of free viewing
        #image pair index 2 tells us if we need to draw a circle/square.

        #myRect_ontheleft = (center_of_screen[0]-300-163, center_of_screen[0]-300+163, center_of_screen[1]+163, center_of_screen[1]-163)
        #myRect_ontheright = (center_of_screen[0]+300-163, center_of_screen[0]+300+163, center_of_screen[1]+163, center_of_screen[1]-163)

        if (image_pair[2] == True):
            # new_face_pair_screen = swap(face_pair_screen, image_pair, tracker)

            #if ("Male" in image_pair[0]):
                #new_suffix = "_result.jpg"
            #else:
            new_suffix = circle_suffix
            if (random.choice([True, False]) == True):
                new_suffix = square_suffix

            image_pair[neutral_image_index] = image_pair[neutral_image_index].replace(regular_suffix, new_suffix)

            disengagement_screen.draw_image(image_pair[0], pos=(center_of_screen[0]-300,center_of_screen[1]), scale=None) #need screen width
            disengagement_screen.draw_image(image_pair[1], pos=(center_of_screen[0]+300,center_of_screen[1]), scale=None) #need screen width

            while keyboard.get_key()[0] == None:
                start_pos = tracker.sample()
                #face_pair_screen.draw_circle(colour=(255,255,255), pos=((start_pos[0]-center_of_screen[0]+300)**2,(start_pos[1]-center_of_screen[1])**2, 326/2))
                #disp.fill(face_pair_screen)
                #disp.show()
                if neutral_image_index == 0:
                    #area = pygame.Rect(myRect_ontheleft)
                    #pygame.draw.rect(face_pair_screen, (100, 200, 70), area)
                    #pygame.display.flip()
                    #if ((start_pos[0]-center_of_screen[0]+300)**2 + (start_pos[1]-center_of_screen[1])**2)**0.5 < 100/2:
                    if AOI_right.contains(start_pos):
                        #face_pair_screen.draw_circle(color=(255,255,255), pos=(start_pos[0]-center_of_screen[0]+300)**2,start_pos[1]-center_of_screen[1])**2), 326/2)

                        #print("you fixated on the right image:))")
                        disengagement_start_time = libtime.get_time()


                        # if fixation is started here... draw new images.
                        #if (start_pos[0] >= center_of_screen[0]-300 and start_pos[0] <= center_of_screen[0]-300+image_HW and start_pos[1] >= center_of_screen[1] and start_pos[1] <= center_of_screen[1]+image_HW):
                        face_pair_screen.clear()
                        #disengagement_screen.draw_text(text="yep", pos=center_of_screen)
                        #while keyboard.get_key()[0] == None:
                        disp.fill(disengagement_screen)
                        disp.show()
                        
                        while True:
                            start_pos = tracker.sample()
                            if AOI_left.contains(start_pos):			
                                print("you fixated on the right image:))")
                                disengagement_end_time = libtime.get_time()
                                break
                        break

                    # then wait for fixation on position of image_pair[1], i.e. the opposite
                if neutral_image_index == 1:
                    #area = pygame.Rect(myRect_ontheright)
                    #pygame.draw.rect(face_pair_screen, (100, 200, 70), area)
                    #pygame.display.flip()
                    #if ((start_pos[0]-center_of_screen[0]-300)**2 + (start_pos[1]-center_of_screen[1])**2)**0.5 < 326/2:
                    if AOI_left.contains(start_pos):
                        disengagement_start_time = libtime.get_time()
                                                                                    #if (start_pos[0] >= center_of_screen[0]+300 and start_pos[0] <= center_of_screen[0]+300+image_HW and start_pos[1] >= center_of_screen[1] and start_pos[1] <= center_of_screen[1]+image_HW):
                        face_pair_screen.clear()
                        #disengagement_screen.draw_image(image_pair[0], pos=(center_of_screen[0]-300,center_of_screen[1]), scale=None) #need screen width
                        #disengagement_screen.draw_image(image_pair[1], pos=(center_of_screen[0]+300,center_of_screen[1]), scale=None) #need screen width
                        disp.fill(disengagement_screen)
                        disp.show()
                        
                        while True:
                            start_pos = tracker.sample()
                            if AOI_right.contains(start_pos):			
                                disengagement_end_time = libtime.get_time()
                                print("Total time taken" + str(disengagement_end_time - disengagement_start_time))
                                break
                        break
        else:
            continue

        if (pressed_key == 'q'):
            break

        # end trial
        trialend = libtime.get_time()
        tracker.stop_recording()
        tracker.log("stop trial %d" % index)


        # log information in the end
        # add a way out (quit if pressing q)
        if keyboard.get_key()[0] == "q":
            break
Exemplo n.º 8
0
from constants import *
from pygaze import libtime
from pygaze.libtime import clock
from pygaze.libgazecon import AOI
from pygaze.libscreen import Display, Screen
from pygaze.libinput import Keyboard
from pygaze.libinput import Mouse
from pygaze.eyetracker import EyeTracker
from pygaze import liblog
from psychopy import event
# # # # #
# SETUP

# visuals
disp = Display()
scr = Screen()
blnk = Screen()
#audio
pygame.mixer.init(frequency=44100, size=-16, buffer=2048, channels=1)
# input
mouse = Mouse(visible=True)
kb = Keyboard()

#Start tracker and calibrate
eyetracker = EyeTracker(disp)
eyetracker.calibrate()
#set up logging
log = liblog.Logfile()
#########################################################################
#Load image files
#image stimuli.
Exemplo n.º 9
0
# start timing
libtime.expstart()

# create display object
disp = Display()

# create eyetracker object
tracker = eyetracker.EyeTracker(disp)

# create keyboard object
keyboard = Keyboard(keylist=['space', "q"], timeout=1)

center_of_screen = (DISPSIZE[0] / 2, DISPSIZE[1] / 2)

# create screen to draw things on
screen1 = Screen()
screen1.draw_fixation(fixtype='cross',
                      pos=center_of_screen,
                      colour=(255, 255, 255),
                      pw=5,
                      diameter=30)
screen2 = Screen()
#screen1.draw_image(base_path, pos=(center_of_screen[0]-300,center_of_screen[1]), scale=None) #need screen width
#screen2.draw_image(base_path1, pos=(center_of_screen[0]+300,center_of_screen[1]), scale=None) #need screen width
screen3 = Screen()

# Create a Screen to draw images on
#screen4 = Screen()

# calibrate eye tracker
tracker.calibrate()