linewidth=None, stimtypes=stimtypes)
    
    # Update the probe Screen.
    probelw = trial['nstim'] * [STIMLINEWIDTH]
    probelw[probed] = PROBELINEWIDTH
    probeoris = trial['nstim'] * [0]
    probestimtypes = trial['nstim'] * ['noise']
    probescr[trial['nstim']].update(stimlocs, probeoris, \
        linewidth=probelw, stimtypes=probestimtypes)
    

    # RUN
    
    # Show the fixation Screen.
    disp.fill(fixscr)
    fixonset = disp.show()
    timer.pause(random.randint(FIXTIME[0], FIXTIME[1]))
    
    # Show the stimulus Screen.
    disp.fill(stimscr[trial['nstim']])
    stimonset = disp.show()
    timer.pause(STIMTIME)
    
    # Show a blank Screen.
    disp.fill(blankscr)
    maintenanceonset = disp.show()
    timer.pause(MAINTENANCETIME)
    
    # Show the probe Screen.
    disp.fill(probescr[trial['nstim']])
    probeonset = disp.show()
예제 #2
0
# visuals
disp = Display()
scr = Screen()

# input
tracker = EyeTracker(disp)
kb = Keyboard(keylist=None, timeout=None)

# calibrate
tracker.calibrate()

# starting screen
scr.clear()
scr.draw_text(text="Press Space to start")
disp.fill(scr)
disp.show()
kb.get_key(keylist=['space'], timeout=None, flush=True)


# # # # #
# VALIDATION

# loop through points
for i in range(len(CALIBPOINTS)):
	# get coordinate
	x, y = CALIBPOINTS[i]
	# draw calibration point
	scr.clear()
	scr.draw_fixation(fixtype='dot', pos=(x,y))
	disp.fill(scr)
	# start recording
예제 #3
0
from constants import *
from pygaze.display import Display
from pygaze.screen import Screen
from pygaze.eyetracker import EyeTracker

disp = Display()
tracker = EyeTracker(disp)

scr.draw_text(text="Resetting connection to %s tracker" % (TRACKERTYPE), \
    fontsize=24)
disp.fill(scr)
disp.show()

try:
    tracker.stop_recording()
except:
    print("Could not stop recording")

tracker.close()
disp.close()
예제 #4
0
        # Now create a PsychoPy ImageStim instance to draw the frame with.
        stim = ImageStim(pygaze.expdisplay, image=frame, size=(width, height))
        # When DISPTYPE='psychopy', a Screen instance's screen property is a list
        # of PsychoPy stimuli. We would like to add the ImageStim we just created
        # to that list, and record at what index in the list it was added.
        # First we get the current length of the stimscr's list of stimuli, which
        # will be the index at which the new ImageStim will be assigned to.
        stim_index = len(stimscr.screen)
        # Then we add the ImageStim to the stimscr. Every time you call
        # disp.fill(stimscr) and then disp.show(), all stimuli in stimscr
        # (including the ImageStim) will be drawn.
        stimscr.screen.append(stim)

    # Wait until the participant presses any key to start.
    disp.fill(textscr)
    disp.show()
    kb.get_key(keylist=None, timeout=None, flush=True)

    # Log the start of the trial.
    log.write([time.strftime("%y-%m-%d"), time.strftime("%H-%M-%S"), \
        trialnr, vidname, timer.get_time()])

    # Start eye tracking.
    tracker.start_recording()
    timer.pause(5)
    tracker.log("TRIALSTART")

    # Show a status message on the EyeLink.
    if TRACKERTYPE == 'eyelink':
        tracker.status_msg("Trial %d/%d (%s)" %
                           (trialnr, len(VIDEOS), vidname))
예제 #5
0
# RUN

# run until a minute has passed
t0 = timer.get_time()
t1 = timer.get_time()
text = "Test the joystick!"
while t1 - t0 < 60000:
	# get joystick input
	event, value, t1 = js.get_joyinput(timeout=10)
	# update text
	if event != None:
		text = text="%s: %s" % (event, value)
		if event == 'joyaxismotion' and RUMBLE:
			set_vibration(0, max(0, value[2]), max(0, -value[2]))
	# display on screen
	scr.clear()
	scr.draw_text(text="%s\n\n(%.2f)" % (text, float(t1-t0)/1000.0), fontsize=24)
	# show text
	disp.fill(scr)
	disp.show()


# # # # #
# CLOSE

# reset rumble to 0
if RUMBLE:
	set_vibration(0, 0, 0)
# close the Display
disp.close()
예제 #6
0
from scansync.mri import MRITriggerBox


##############
# INITIALISE #
##############

# Initialise a new Display instance.
disp = Display()

# Present a start-up screen.
scr = Screen()
scr.draw_text("Loading, please wait...", fontsize=24)
disp.fill(scr)
disp.show()

# Open a new log file.
log = Logfile()
# TODO: Write header.
log.write(["trialnr", "block", "run","stim", "keypress", "go_nogo", "face_onset", "signal_onset","resp_onset", "RT", "accuracy", "respmap", "block_type"])

# Open a new log file to log events.
event_log = Logfile(filename=EVENT_LOG)
event_log.write(["time", "event"])

# Initialise the eye tracker.
tracker = EyeTracker(disp)

# Create a new Keyboard instance to process key presses.
kb = Keyboard(keylist=None, timeout=5000)
예제 #7
0
# create a new logfile
log = Logfile(filename="test")
log.write(["x_pos","y_pos", "time"])

# # # # #
# test gaze contingency

# UI test
scr.clear()
scr.draw_image(image_file)
x = (DISPSIZE[0] - IMGSIZE[0]) / 2 # centre minus half of the image width
y = (DISPSIZE[1] - IMGSIZE[1]) / 2 # centre minus half of the image height
aoi = AOI('rectangle', (x,y), IMGSIZE)
disp.fill(scr)
t1 = disp.show()
key = None
tracker.start_recording()
while key != 'space':
    # check for key input
    key, presstime = kb.get_key(keylist=['space'],timeout=1)
    # get gaze position
    gazepos = tracker.sample()
    gazetime = clock.get_time() - t1
    if aoi.contains(gazepos):
        print(gazepos)
        print(gazetime)
        log.write([gazepos[0],gazepos[1],gazetime])
    else:
        continue
tracker.stop_recording()
예제 #8
0
from libmeg import *

#%%

# # # # #
# INITIALISE

# Initialise a new Display instance.
disp = Display()

# Present a start-up screen.
scr = Screen()
scr.draw_text("Loading, please wait...", fontsize=MAIN_FONTSIZE)
disp.fill(scr)
disp.show()



# Open a new log file.
log = Logfile(filename = LOGFILE)
log_det = Logfile(filename = DETAILED_LOGFILE)
log_events = Logfile(filename = EVENT_LOGFILE)
# TODO: Write header.
log.write(["trialnr","left_ang","right_ang", "cue_dir", "targ", "targ_ang", "resp_ang", "perc_diff", "resp_onset", "resp_duration", "iti", "iti_onset", "stim_onset","delay_onset", "cue_onset", "postcue_onset","probe_onset", "prac"])
log_det.write(["trialnr", "timestamp", "angle", "event", "targ_ang", "cue_dir"])
log_events.write(["Trigger", "Timestamp"])
# Initialise the eye tracker.
tracker = EyeTracker(disp)

# Create a new Keyboard instance to process key presses.
예제 #9
0
                        'tarside': tarside, \
                         'target': tar, \
                         'soa': soa}

                #alltrials.append(trial) #Adicionar a dict 'trial' dentro da lista 'alltrials'
                alltrials.extend(TRIALREPEATS *
                                 [trial])  #Vinculando a constante TRIALREPEATS

random.shuffle(alltrials)  # RANDOMIZAR A ORDEM DOS ENSAIOS
'''
Apresentação das instruções
o voluntário deverá apertar a barra de espaço
'''

disp.fill(instscr)
disp.show()
kb.get_key(keylist='space', timeout=None)
#timer.pause(20000)
'''
Loop para realizar a tarefa N ensaiios
'''

for trial in alltrials:
    '''
    Apresentação da tela de fixação
    '''
    disp.fill(fixscr)
    fixonset = disp.show()

    timer.pause(FIXTIME)
    '''
예제 #10
0
    "correct"])

# Create a BAD sound.
bad_sound = Sound(osc="whitenoise", length=200)
bad_sound.set_volume(1)
good_sound = Sound(osc="sine", freq=440, length=200)
good_sound.set_volume(0.5)

# Create a new Screen instance.
scr = Screen()
scr.draw_text("Welcome!", fontsize=100, \
    colour=(255,100,100))

# Pass the screen to the display.
disp.fill(scr)
disp.show()
timer.pause(3000)

# Create a list of all trials.
trials = []
# Add all the words.
for word in WORDS:
    t = {}
    t["type"] = "word"
    t["stimulus"] = word
    trials.append(t)
# Add all the nonwords.
for word in NONWORDS:
    t = {}
    t["type"] = "nonword"
    t["stimulus"] = word