"Keep your eyes closed for 20 seconds, Open them when you hear a long beep/tone. Close them and press the spacebar to begin.", pos=(0, -3), ) instr3 = visual.TextStim(mywin, text="Keep your eyes closed at this time.", pos=(0, -3)) instr4 = visual.TextStim( mywin, text="You have finished the experiment! Press the spacebar to exit.", pos=(0, -3), ) # setup the fixation fixation = visual.GratingStim(win=mywin, size=0.1, pos=[0, 0], sf=0, rgb=[1, 1, 1]) core.wait(2) if cond_order == 1: timestamp = local_clock() outlet.push_sample([1], timestamp) core.wait(1) # display instructions for the first eyes-open block instr1.setAutoDraw(True) fixation.setAutoDraw(True) mywin.flip() event.waitKeys()
#!/usr/bin/env python """ This demo shows how you can make standard opengl calls within a psychopy script, allowing you to draw anything that OpenGL can draw (ie anything) """ from psychopy import visual, core, event from pyglet.gl import * myWin = visual.Window([600,600], units='norm',monitor='testMonitor') a_blob = visual.GratingStim(myWin, pos = [0.5,0],mask='gauss', sf=3) xx = visual.GratingStim(myWin, texRes=4) def drawStuff(): glBegin(GL_TRIANGLES) glColor3f(1.0, 0.0, 1) glVertex3f(0.0, 0.5, 1) glColor3f(0.0, 1.0, 0.0) glVertex3f(-0.5, -0.5, 1) glColor3f(0.0, 0.0, 1.0) glVertex3f(0.5, -0.5, -1) glEnd() a_blob.draw() myWin.flip() drawStuff() core.wait(2) core.quit()
staircase = data.StairHandler( startVal=20.0, stepType='db', stepSizes=[8, 4, 4, 2, 2, 1, 1], nUp=1, nDown=3, #will home in on the 80% threshold nTrials=1) #create window and stimuli win = visual.Window([800, 600], allowGUI=True, monitor='testMonitor', units='deg') foil = visual.GratingStim(win, sf=1, size=4, mask='gauss', ori=expInfo['refOrientation']) target = visual.GratingStim(win, sf=1, size=4, mask='gauss', ori=expInfo['refOrientation']) fixation = visual.GratingStim(win, color=-1, colorSpace='rgb', tex=None, mask='circle', size=0.2) #and some handy clocks to keep track of time globalClock = core.Clock()
instrText = visual.TextStim(win=win, ori=0, name='instrText', text="OK. Ready?\n\nRemember:\r\n1) Stay fixated on the central white dot.\r\n2) Ignore the word itself; press:\n\t- Left for red LETTERS\n\t- Down for green LETTERS\n\t- Right for blue LETTERS\n\t- (Esc will quit)\n3) To toggle gaze position visibility, press 'g'.\r\n\nPress any key to continue", font='Arial', units='pix', pos=[0, 0], height=50, wrapWidth=800, color=[1, 1, 1], colorSpace='rgb', opacity=1, depth=0.0) # Initialize components for Routine "trial" trialClock = core.Clock() word = visual.TextStim(win=win, ori=0, name='word', text='nonsense', font=u'Arial', pos=[0, 0], height=100, wrapWidth=None, color=1.0, colorSpace=u'rgb', opacity=1, depth=0.0) fixation = visual.GratingStim(win=win, name='fixation',units='pix', tex='sin', mask='circle', ori=0, pos=[0, 0], size=[16,16], sf=40, phase=0.0, color=[1,1,1], colorSpace='rgb', opacity=.7, texRes=128, interpolate=True, depth=-1.0) gaze_cursor = visual.GratingStim(win=win, name='gaze_cursor',units='pix', tex='sin', mask='circle', ori=0, pos=[0,0], size=[20,20], sf=40, phase=0.0, color=[0.004,0.004,1.000], colorSpace='rgb', opacity=1.0, texRes=128, interpolate=True, depth=-2.0) maintain_fix_pix_boundary=66.0 eyetracker =False#will change if we get one! if expInfo['Eye Tracker']: from psychopy.iohub import EventConstants,ioHubConnection,load,Loader from psychopy.data import getDateStr # Load the specified iohub configuration file converting it to a python dict. io_config=load(file(expInfo['Eye Tracker'],'r'), Loader=Loader)
bits = crs.BitsSharp(win=win, mode='bits++') print(bits.info) if not bits.OK: print('failed to connect to Bits box') core.quit() core.wait(0.1) # Now, you can change modes using bits.mode = 'mono++' # 'color++', 'mono++', 'bits++', 'auto++' or 'status' # Create a stimulus and draw as normal stim = visual.GratingStim(win, tex='sin', units='pix', size=400, sf=0.01, mask='gauss', autoLog=False) globalClock = core.Clock() while globalClock.getTime() < 3: t = globalClock.getTime() stim.phase = t * 3 # drift at 3Hz stim.draw() win.flip() # You can test pixel values (going to the box) using getVideoLine(); # this requires 'status' mode and that takes a few moments to set up bits.mode = 'status' core.wait(3) pixels = bits.getVideoLine(lineN=1, nPixels=5)
fullscr=True, screen=0, allowGUI=False, allowStencil=False, monitor='testMonitor', color=[-1, -1, -1], colorSpace='rgb', blendMode='avg', useFBO=True, ) #CREATE NONTEXT OBJECTS fixation = visual.GratingStim(win=win, mask='cross', size=0.25, pos=[0, 0], sf=0.1) sync = visual.ShapeStim(win, units='', lineWidth=1.5, lineColor='white', lineColorSpace='rgb', fillColor='white', fillColorSpace='rgb', vertices=((0.8, 0.9), (0.9, 0.9), (0.9, 0.8), (0.8, 0.8)), closeShape=True, pos=(0, 0), size=1,
psychopy_mon_name = 'testMonitor' exp_code = 'io_stroop' io = launchHubServer(psychopy_monitor_name=psychopy_mon_name, experiment_code=exp_code) io.sendMessageEvent(category='EXP', text='Experiment Started') kb = io.devices.keyboard mouse = io.devices.mouse win = visual.Window(allowGUI=False, fullscr=True) gabor = visual.GratingStim(win, tex="sin", mask="gauss", texRes=256, size=[200.0, 200.0], sf=[4, 0], ori=0, name='gabor1') letter = visual.TextStim(win, pos=(0.0, 0.0), text='X') retrace_count = 0 def loggedFlip(letter_char, letter_color): global retrace_count gabor.draw() letter.setText(letter_char) letter.setColor(letter_color) letter.draw() flip_time = win.flip()
TRIAL_LIST_RAND = TRIAL_LIST random.shuffle(TRIAL_LIST_RAND) # header for data log data = np.hstack(("Subject", "Run", "Type", "Category", "key_pressed", "RESP", "Accuracy", "RT")) for i in range( 1, 4 ): # runs 3 runs. Change the second number here to increase/decrease number of runs. for index in range(len(TRIAL_LIST_RAND)): #draw so we are ready to flip stim = visual.GratingStim(win, tex='sin', mask='gauss', sf=TRIAL_LIST_RAND[index]['ii_freq'], size=11, ori=TRIAL_LIST_RAND[index]['ii_or'], units='deg', autoLog=True) stim.draw() win.flip() t0 = globalClock.getTime() while globalClock.getTime() - t0 <= 5: #abort if esc was pressed KEY = get_keypress() if KEY != None: break # map keypress to meaningful response type if KEY != None: resp = KEY[0][0] RT = KEY[0][1] - t0
max_contr = .98 # create a default keyboard (e.g. to check for escape) defaultKeyboard = keyboard.Keyboard() # Initialize components for Routine "trial" pr_grating = visual.GratingStim(win=win, name='grating_murray', units='deg', tex='sin', mask='gauss', ori=0, pos=(0, 0), size=(10, 10), sf=1.2, phase=0, color=[max_contr, max_contr, max_contr], colorSpace='rgb', opacity=1, blendmode='avg', texRes=128, interpolate=True, depth=0.0) clock = core.Clock() max_resp_secs = 5 # Run-time loop for 1 Hz grating, max contrast .8 keep_going = True
font='Arial', pos=[0, 0], height=100, wrapWidth=None, color=1.0, colorSpace='rgb', opacity=1, depth=0.0) fixation = visual.GratingStim(win=win, name='fixation', units=u'pix', tex=None, mask=u'circle', ori=0, pos=[0, 0], size=16, sf=32, phase=0.0, color=[1, 1, 1], colorSpace=u'rgb', opacity=.7, texRes=128, interpolate=True, depth=-1.0) # Initialize components for Routine "thanks" thanksClock = core.Clock() thanksText = visual.TextStim( win=win, ori=0, name='thanksText', text='This is the end of the experiment.\n\nThanks!',
assert '__file__' in locals() #to make sure to not run this inside Jupyter from psychopy import visual, event from time import sleep mywin = visual.Window(size=[800, 600], monitor="testMonitor", units="norm", color=[255, 255, 255]) fixation = visual.GratingStim(win=mywin, size=0.015, pos=[0, 0], sf=0, color=-1) grating = visual.GratingStim(win=mywin, mask="circle", size=0.2, pos=[-0.8, 0], sf=3) #without these, the stimulus is not drawn fixation.draw() grating.draw() #we draw onto the back buffer, and have to *flip* front and back buffer for the stimuli to be shown mywin.flip() sleep(5)
print mon.currentCalib['notes'] win = visual.Window( size = [1920, 1200], monitor = mon, screen = 0, fullscr = True, colorSpace = 'rgb255', color = 128, units = 'pix') noise_tex = np.random.rand(1920, 1200) * 2 - 1 noise_tex = np.random.binomial(2, 0.5, (1920, 1200)) - 1 noise = visual.GratingStim( win = win, tex = noise_tex, interpolate = False, size = [1920, 1200]) reference = visual.GratingStim( win = win, tex = None, colorSpace = 'rgb255', color = [1,1,1], size = [512, 256]) target = visual.GratingStim( win = win, tex = None, colorSpace = 'rgb255', color = [0,0,0],
languageStyle='LTR', depth=-2.0); proceed = keyboard.Keyboard() # Initialize components for Routine "experiment" experimentClock = core.Clock() Fixation = visual.TextStim(win=win, name='Fixation', text='+', font='Arial', pos=(0, 0), height=0.3, wrapWidth=None, ori=0, color='white', colorSpace='rgb', opacity=1, languageStyle='LTR', depth=0.0); target = visual.GratingStim( win=win, name='target', tex='sin', mask='circle', ori=1.0, pos=(0.5, 0), size=(0.5, 0.5), sf=[4,5], phase=0.0, color=[1,1,1], colorSpace='rgb', opacity=1,blendmode='avg', texRes=128, interpolate=True, depth=-1.0) original = visual.GratingStim( win=win, name='original', tex='sin', mask='circle', ori=1.0, pos=(-0.5, 0), size=(0.5, 0.5), sf=[4,5], phase=0.0, color=[1,1,1], colorSpace='rgb', opacity=1,blendmode='avg', texRes=128, interpolate=True, depth=-2.0) instruction_1 = visual.TextStim(win=win, name='instruction_1', text="Press 'up' if target is the same as original", font='Arial', pos=(0, 0.1), height=0.1, wrapWidth=None, ori=0, color='white', colorSpace='rgb', opacity=1, languageStyle='LTR', depth=-3.0);
# Create psychopy window my_monitor = monitors.Monitor( 'testMonitor', width=MON_WIDTH, distance=MON_DISTANCE ) # Create monitor object from the variables above. This is needed to control size of stimuli in degrees. my_monitor.setSizePix(MON_SIZE) win = visual.Window( monitor=my_monitor, units='deg', fullscr=True, allowGUI=False, color='black' ) # Initiate psychopy Window as the object "win", using the myMon object from last line. Use degree as units! # Stimuli. stim_gabor = visual.GratingStim( win, mask='gauss', sf=GABOR_SF, size=GABOR_SIZE) # A gabor patch. Again, units are inherited. stim_fix = visual.TextStim( win, '+', height=FIX_HEIGHT ) # Fixation cross is just the character "+". Units are inherited from Window when not explicitly specified. stim_text = visual.TextStim( win, pos=MESSAGE_POS, height=MESSAGE_HEIGHT, wrapWidth=999 ) # Message / question stimulus. Will be used to display instructions and questions. sound_success = sound.Sound( 'C', secs=0.1, octave=6 ) # Obs, ppc.Sound() is much more accurate, but only works on windows. sound_fail = sound.Sound('C', secs=0.4, octave=4) """ FUNCTIONS """
# %% """STIMULI""" # INITIALISE SOME STIMULI movRTP = visual.GratingStim(myWin, tex=noiseTexture, mask='none', pos=(0.0, 0.0), size=(fieldSizeinPix, fieldSizeinPix), sf=None, ori=0.0, phase=(0.0, 0.0), color=(1.0, 1.0, 1.0), colorSpace='rgb', contrast=1.0, opacity=1.0, depth=0, rgbPedestal=(0.0, 0.0, 0.0), interpolate=False, name='movingRTP', autoLog=None, autoDraw=False, maskParams=None) # fixation dot dotFix = visual.Circle( myWin, autoLog=False, name='dotFix',
def present(duration=30, eeg=None, save_fn=None): n_trials = 2010 iti = 0.5 soa = 3.0 jitter = 0.2 record_duration = np.float32(duration) markernames = [1, 2] # Setup trial list stim_freq = np.random.binomial(1, 0.5, n_trials) trials = DataFrame(dict(stim_freq=stim_freq, timestamp=np.zeros(n_trials))) # Set up graphics mywin = visual.Window([1600, 900], monitor="testMonitor", units="deg", fullscr=True) grating = visual.GratingStim(win=mywin, mask="circle", size=80, sf=0.2) grating_neg = visual.GratingStim(win=mywin, mask="circle", size=80, sf=0.2, phase=0.5) fixation = visual.GratingStim(win=mywin, size=0.2, pos=[0, 0], sf=0.2, color=[1, 0, 0], autoDraw=True) # set_trace() # Generate the possible ssvep frequencies based on monitor refresh rate def get_possible_ssvep_freqs(frame_rate, stim_type="single"): """Get possible SSVEP stimulation frequencies. Utility function that returns the possible SSVEP stimulation frequencies and on/off pattern based on screen refresh rate. Args: frame_rate (float): screen frame rate, in Hz Keyword Args: stim_type (str): type of stimulation 'single'-> single graphic stimulus (the displayed object appears and disappears in the background.) 'reversal' -> pattern reversal stimulus (the displayed object appears and is replaced by its opposite.) Returns: (dict): keys are stimulation frequencies (in Hz), and values are lists of tuples, where each tuple is the number of (on, off) periods of one stimulation cycle For more info on stimulation patterns, see Section 2 of: Danhua Zhu, Jordi Bieger, Gary Garcia Molina, and Ronald M. Aarts, "A Survey of Stimulation Methods Used in SSVEP-Based BCIs," Computational Intelligence and Neuroscience, vol. 2010, 12 pages, 2010. """ max_period_nb = int(frame_rate / 6) periods = np.arange(max_period_nb) + 1 if stim_type == "single": freqs = dict() for p1 in periods: for p2 in periods: f = frame_rate / (p1 + p2) try: freqs[f].append((p1, p2)) except: freqs[f] = [(p1, p2)] elif stim_type == "reversal": freqs = {frame_rate / p: [(p, p)] for p in periods[::-1]} return freqs def init_flicker_stim(frame_rate, cycle, soa): """Initialize flickering stimulus. Get parameters for a flickering stimulus, based on the screen refresh rate and the desired stimulation cycle. Args: frame_rate (float): screen frame rate, in Hz cycle (tuple or int): if tuple (on, off), represents the number of 'on' periods and 'off' periods in one flickering cycle. This supposes a "single graphic" stimulus, where the displayed object appears and disappears in the background. If int, represents the number of total periods in one cycle. This supposes a "pattern reversal" stimulus, where the displayed object appears and is replaced by its opposite. soa (float): stimulus duration, in s Returns: (dict): dictionary with keys 'cycle' -> tuple of (on, off) periods in a cycle 'freq' -> stimulus frequency 'n_cycles' -> number of cycles in one stimulus trial """ if isinstance(cycle, tuple): stim_freq = frame_rate / sum(cycle) n_cycles = int(soa * stim_freq) else: stim_freq = frame_rate / cycle cycle = (cycle, cycle) n_cycles = int(soa * stim_freq) / 2 return {"cycle": cycle, "freq": stim_freq, "n_cycles": n_cycles} # Set up stimuli frame_rate = np.round(mywin.getActualFrameRate()) # Frame rate, in Hz freqs = get_possible_ssvep_freqs(frame_rate, stim_type="reversal") stim_patterns = [ init_flicker_stim(frame_rate, 144, soa), init_flicker_stim(frame_rate, 144, soa), ] print(("Flickering frequencies (Hz): {}\n".format( [stim_patterns[0]["freq"], stim_patterns[1]["freq"]]))) # Show the instructions screen show_instructions(duration) # start the EEG stream, will delay 5 seconds to let signal settle if eeg: if save_fn is None: # If no save_fn passed, generate a new unnamed save file save_fn = generate_save_fn(eeg.device_name, "visual_ssvep", "unnamed") print( f"No path for a save file was passed to the experiment. Saving data to {save_fn}" ) eeg.start(save_fn, duration=record_duration) # Iterate through trials start = time() for ii, trial in trials.iterrows(): # Intertrial interval core.wait(iti + np.random.rand() * jitter) # Select stimulus frequency ind = trials["stim_freq"].iloc[ii] # Push sample if eeg: timestamp = time() if eeg.backend == "muselsl": marker = [markernames[ind]] else: marker = markernames[ind] eeg.push_sample(marker=marker, timestamp=timestamp) # Present flickering stim for _ in range(int(stim_patterns[ind]["n_cycles"])): grating.setAutoDraw(True) for _ in range(int(stim_patterns[ind]["cycle"][0])): mywin.flip() grating.setAutoDraw(False) grating_neg.setAutoDraw(True) for _ in range(stim_patterns[ind]["cycle"][1]): mywin.flip() grating_neg.setAutoDraw(False) # offset mywin.flip() if len(event.getKeys()) > 0 or (time() - start) > record_duration: break event.clearEvents() # Cleanup if eeg: eeg.stop() mywin.close()
##################### # Initialize if useDB: sessionID = startExp(expName, createTableStatement, dbConf) else: sessionID = 1 # dim: 1680x1050 window = visual.Window(units="pix", size=(1024, 768), color=[-1, -1, -1], fullscr=True) grating1 = visual.GratingStim(win=window, size=(2000, 2000), sf=0.01, ori=45, contrast=.2, opacity=1) grating2 = visual.GratingStim(win=window, size=(2000, 2000), sf=0.01, ori=135, contrast=.2, opacity=.5) noise1 = visual.NoiseStim(win=window, units='pix', noiseType='white', size=(1650, 1050), opacity=.3) mouse = event.Mouse(visible=False) timer = core.Clock()
win = visual.Window(display.getPixelResolution(), units=display.getCoordinateType(), fullscr=True, allowGUI=False) win.setMouseVisible(True) gaze_ok_region = visual.Circle(win, lineColor='black', radius=300, units='pix') gaze_dot = visual.GratingStim(win, tex=None, mask='gauss', pos=(0, 0), size=(40, 40), color='green', units='pix') text_stim_str = 'Eye Position: %.2f, %.2f. In Region: %s\n' text_stim_str += 'Press space key to start next trial.' missing_gpos_str = 'Eye Position: MISSING. In Region: No\n' missing_gpos_str += 'Press space key to start next trial.' text_stim = visual.TextStim(win, text=text_stim_str, pos=[0, 0], height=24, color='black', units='pix', wrapWidth=win.size[0] * .9)
from psychopy import visual, core, event import numpy as np import time tex = np.array([[1, -1], [-1, 1]]) cycles = 7 size = 512 frequency = 5 win = visual.Window([1024, 700], units='pix') stim = visual.GratingStim(win, tex=tex, size=size, units='pix', sf=cycles / size, interpolate=False) frame_rate = win.getActualFrameRate() frame_interval = 1 / frame_rate interval = 1 / frequency stim_frames = np.round(interval / frame_interval) print("Frame rate is {0}. Actual Flashing Frequency will be {1}".format( frame_rate, str(1 / (stim_frames * frame_interval)))) win.flip() n = 0 while not event.getKeys('escape'): if n % stim_frames == 0: stim.tex = -stim.tex stim.draw() win.flip()
# Initialize components for Routine "instruction" instructionClock = core.Clock() instr = visual.TextStim(win=win, name='instr', text='Drücken Sie die Leertaste wenn das angezeigte Kreuz rot ist.\nDrücken Sie keine Taste falls das angezeigte Kreuz NICHT rot ist.\n\nDrücken Sie die Leertaste zum starten!', font='Arial', pos=(0, 0), height=0.05, wrapWidth=None, ori=0, color='white', colorSpace='rgb', opacity=1, languageStyle='LTR', depth=0.0); # Initialize components for Routine "trial" trialClock = core.Clock() fixation = visual.GratingStim( win=win, name='fixation', tex='sin', mask='gauss', ori=0, pos=(0, 0), size=(0.1, 0.1), sf=0, phase=0.0, color=[-1,-1,-1], colorSpace='rgb', opacity=1.0,blendmode='avg', texRes=128, interpolate=True, depth=0.0) go_item = visual.ShapeStim( win=win, name='go_item', vertices='cross', size=(0.3, 0.3), ori=0, pos=[0,0], lineWidth=1, lineColor=[-1,-1,-1], lineColorSpace='rgb', fillColor=1.0, fillColorSpace='rgb', opacity=1.0, depth=-1.0, interpolate=True) #Dies ist ein Kommentar #Wir müssen am Anfang des Experiments das random Modul (von python) #importieren um Zugriff auf einen Zufallsgenerator zu haben import random # Initialize components for Routine "feedback"
def setupMonitor(self): physicalSize = monitorTools.getPhysicalSize() resolution = monitorTools.getResolution() self.mon = monitors.Monitor('testMonitor') self.mon.setDistance( self.config['Display settings'] ['monitor_distance']) # Measure first to ensure this is correct self.mon.setWidth(physicalSize[0] / 10) self.mon.setSizePix(resolution) self.mon.save() self.win = visual.Window(size=resolution, fullscr=True, monitor='testMonitor', allowGUI=False, units='deg') self.background = visual.Rect( self.win, size=[dim * 2 for dim in resolution], units='pix', color=self.config['Display settings']['background_color']) self.flipBuffer() self.referenceCircles = [ visual.Circle( self.win, radius=self.config['Stimuli settings']['stimulus_size'] * .5, lineColor=-1, lineWidth=5, name='Circle surrounding patch'), visual.Circle( self.win, radius=self.config['Stimuli settings']['stimulus_size'] * .6, lineColor=-1, lineWidth=5, name='Circle surrounding patch'), ] self.stim = visual.GratingStim( self.win, contrast=self.config['Stimuli settings']['stimulus_contrast'], sf=self.config['Stimuli settings']['stimulus_frequency'], size=self.config['Stimuli settings']['stimulus_size'], mask='gauss') fixationVertices = ( (0, -0.5), (0, 0.5), (0, 0), (-0.5, 0), (0.5, 0), ) self.fixationStim = visual.ShapeStim( self.win, vertices=fixationVertices, lineColor=-1, closeShape=False, size=self.config['Display settings']['fixation_size'] / 60.0) self.fixationAid = [ visual.Circle( self.win, radius=self.config['Gaze tracking']['gaze_offset_max'] * .5, lineColor=self.config['Display settings']['fixation_color'], fillColor=None, ), visual.Circle( self.win, radius=self.config['Gaze tracking']['gaze_offset_max'] * .05, fillColor=self.config['Display settings']['fixation_color'], lineColor=None, ) ] if self.config['Display settings']['show_annuli']: self.annuli = {} for eccentricity in self.config['Stimuli settings'][ 'eccentricities']: self.annuli[eccentricity] = [] for angle in self.config['Stimuli settings'][ 'stimulus_position_angles']: pos = [ numpy.cos(angle * numpy.pi / 180.0) * eccentricity, numpy.sin(angle * numpy.pi / 180.0) * eccentricity, ] self.annuli[eccentricity].append( visual.Circle(self.win, pos=pos, radius=.5 * monitorTools.scaleSizeByEccentricity( self.config['Stimuli settings'] ['stimulus_size'], eccentricity), lineColor=self.config['Display settings'] ['annuli_color'], fillColor=None, units='deg')) if self.config['Stimuli settings']['mask_time'] > 0: self.masks = {} size = self.config['Stimuli settings']['stimulus_size'] maskImagePath = assets.getFilePath( os.path.join('assets', 'PyOrientationDiscrimination', 'mask.png')) for eccentricity in self.config['Stimuli settings'][ 'eccentricities']: self.masks[eccentricity] = [] for angle in self.config['Stimuli settings'][ 'stimulus_position_angles']: pos = [ numpy.cos(angle * numpy.pi / 180.0) * eccentricity, numpy.sin(angle * numpy.pi / 180.0) * eccentricity, ] self.masks[eccentricity].append( visual.ImageStim( self.win, image=maskImagePath, pos=pos, size=monitorTools.scaleSizeByEccentricity( size, eccentricity), mask='gauss', )) if self.config['Gaze tracking']['wait_for_fixation'] or self.config[ 'Gaze tracking']['render_at_gaze']: self.screenMarkers = PyPupilGazeTracker.PsychoPyVisuals.ScreenMarkers( self.win) self.gazeTracker = PyPupilGazeTracker.GazeTracker.GazeTracker( smoother=PyPupilGazeTracker.smoothing.SimpleDecay(), screenSize=resolution) self.gazeTracker.start(closeShutter=False) self.gazeMarker = PyPupilGazeTracker.PsychoPyVisuals.FixationStim( self.win, size=self.config['Gaze tracking']['gaze_offset_max'], units='deg', autoDraw=False) else: self.gazeTracker = None self.cobreCommander = ShutterController() self.trial = None
# Set the trial clock to 0. # This clock will start counting from the wait screen, so includes that time.. clockTrial.reset() # If testing, show the blank. # if config.TESTING: # blankStimulus = config.PROCESSOR(config.BLANK_IMAGE, config.GRID) # rendered = config.GRID.render(blankStimulus.vector) # imageStimulus = visual.ImageStim(testWin, image=rendered, size=(2,2)) # imageStimulus.draw(); testWin.flip() # Show a prompt on grey background at the beginning of the trial and wait for a keypress. bg = visual.GratingStim(win, tex=None, mask=None, size=2, units='norm', color=0) prompt = visual.TextStim(win, text=config.PROMPT_TEXT.format( trial * 100 // config.NTRIALS)) # prompt = visual.TextStim(win, text='HELLO', font='Consolas') bg.draw() prompt.draw() win.flip() event.waitKeys(clearEvents=True) # bg.draw(); win.flip(); event.waitKeys(clearEvents=True) # prompt.draw(); win.flip(); event.waitKeys(clearEvents=True) previousDigit = False
logging.log(level=logging.EXP, msg=msg) print(msg) ########################################################### # Prepare window and stimuli ########################################################### win = visual.Window([1920, 1080], pos=[0, 0], fullscr=True, autoLog=False, monitor="DellLaptop") fixation = visual.GratingStim(win, tex=None, mask='gauss', sf=0, size=0.02, name='fixation', autoLog=False) num_images = 10 images = [ visual.ImageStim(win, image='images/img%d.jpg' % image_list[idx_image]) for idx_image in range(num_images) ] open_sound = sound.Sound(value='C', secs=0.5, octave=5, sampleRate=44100, bits=16,
globalClock = core.Clock() # for luminance modulated noise noiseMatrix = num.random.randint(0, 2, [pixels, pixels]) # * noiseContrast noiseMatrix = noiseMatrix * 2.0 - 1 # into range -1: 1 stimFrames = [] lumGratings = [] # create the 4 frames of the sequence (luminance and contrast modulated noise in quadrature) lumGratings.append(filters.makeGrating(pixels, 0, cyclesSpace, phase=0)) stimFrames.append( visual.GratingStim(win, texRes=pixels, mask='circle', size=pixels * 2, sf=1.0 / pixels, ori=90, tex=(noiseMatrix * info['lumModNoise'] + lumGratings[0] * info['lumModLum']))) lumGratings.append( filters.makeGrating(pixels, 0, cyclesSpace, phase=90) / 2.0 + 0.5) stimFrames.append( visual.GratingStim(win, texRes=pixels, mask='circle', size=pixels * 2, sf=1.0 / pixels, ori=90, tex=(noiseMatrix * info['contrastModNoise'] * lumGratings[1]))) lumGratings.append(filters.makeGrating(pixels, 0, cyclesSpace, phase=180))
customMon = monitors.Monitor('demoMon', width=35, distance=65) customMon.setSizePix((SCN_WIDTH, SCN_HEIGHT)) # Open a window win = visual.Window((SCN_WIDTH, SCN_HEIGHT), fullscr=False, monitor=customMon, units='pix', allowStencil=True) # Request Pylink to use the PsychoPy window for calibration graphics = EyeLinkCoreGraphicsPsychoPy(tk, win) pylink.openGraphicsEx(graphics) # Step 5: prepare the pursuit target, the clock and the movement parameters target = visual.GratingStim(win, tex=None, mask='circle', size=25) pursuitClock = core.Clock() # Paramters for the Sinusoidal movement pattern # [amp_x, amp_y, phase_x, phase_y, freq_x, freq_y] mov_pars = [[300, 300, pi * 3 / 2, pi * 2, 1.0, 1.0], [300, 300, pi * 3 / 2, pi, 1.0, 1.0]] # Step 6: show some instructions and calibrate the tracker. calib_prompt = 'Press Enter twice to calibrate the tracker' calib_msg = visual.TextStim(win, text=calib_prompt, color='white', units='pix') calib_msg.draw() win.flip() event.waitKeys() # Calibrate the tracker
def initializePatchesExample(win, exampleText, positionPatch1, positionPatch2): """ This function initializes an example to introduce the Gabor-patches Input: win: window object exampleText: text that is displayed during patch example positionPatch1: position of the first patch positionPatch2: position of the second patch Return: patchExampleClock object: clock object for patch example timing examplePatch1: patch1 object instance examplePatch2: patch2 object instance exampleText: exampleText object instance """ patchExampleClock = core.Clock() examplePatch1 = visual.GratingStim(win=win, name='examplePatch1', units='cm', tex=u'sin', mask=u'raisedCos', ori=0, pos=positionPatch1, size=10, sf=0.4, phase=0.0, color=[1, 1, 1], colorSpace='rgb', opacity=0.7, texRes=512, interpolate=True, depth=-1.0) examplePatch2 = visual.GratingStim(win=win, name='examplePatch2', units='cm', tex=u'sin', mask=u'raisedCos', ori=0, pos=positionPatch2, size=10, sf=0.4, phase=0.0, color=[1, 1, 1], colorSpace='rgb', opacity=0.5, texRes=512, interpolate=True, depth=-1.0) exampleText = visual.TextStim(win=win, ori=0, name='mainText', text=exampleText, font=u'Arial', pos=[0, 0.6], height=0.08, wrapWidth=1.5, color=u'white', colorSpace='rgb', opacity=1, depth=-4.0, units="norm") return (patchExampleClock, examplePatch1, examplePatch2, exampleText)
def present(record_duration=120,stim_types=None,itis=None,additional_labels={},secs=0.07,volume=0.8, eeg=None, save_fn=None): markernames = [1, 2] start = time.time() # Set up trial parameters #record_duration = np.float32(duration) if eeg: if save_fn is None: # If no save_fn passed, generate a new unnamed save file save_fn = generate_save_fn(eeg.device_name, 'auditory_erp_arrayin', 'unnamed') print(f'No path for a save file was passed to the experiment. Saving data to {save_fn}') eeg.start(save_fn, duration=record_duration) # Initialize stimuli #aud1 = sound.Sound('C', octave=5, sampleRate=44100, secs=secs) aud1 = sound.Sound(440,secs=secs)#, octave=5, sampleRate=44100, secs=secs) aud1.setVolume(volume) #aud2 = sound.Sound('D', octave=6, sampleRate=44100, secs=secs) aud2 = sound.Sound(528,secs=secs) aud2.setVolume(volume) auds = [aud1, aud2] # Setup trial list trials = DataFrame(dict(sound_ind=stim_types,iti=itis)) for col_name,col_vec in additional_labels.items(): trials[col_name] = col_vec # Setup graphics mywin = visual.Window([1920, 1080], monitor='testMonitor', units='deg', fullscr=True) fixation = visual.GratingStim(win=mywin, size=0.2, pos=[0, 0], sf=0, rgb=[1, 0, 0]) fixation.setAutoDraw(True) mywin.flip() iteratorthing = 0 for ii, trial in trials.iterrows(): iteratorthing = iteratorthing + 1 # Intertrial interval time.sleep(trial['iti']) # Select and play sound ind = int(trial['sound_ind']) auds[ind].stop() auds[ind].play() additional_stamps = [] for k in additional_labels.keys(): additional_stamps += [trial[k]] # Send marker timestamp = time.time() if eeg: if eeg.backend == 'muselsl': #marker = [markernames[label]] marker = list(map(int, additional_stamps)) #marker = [additional_stamps] else: #marker = markernames[label] marker = additional_stamps eeg.push_sample(marker=marker, timestamp=timestamp) if len(event.getKeys()) > 0 or time.time() - start > record_duration: print("breaking") print("time.time() - start/duration:") print(time.time() - start) print(iteratorthing) break event.clearEvents() if(time.time() - start < record_duration and iteratorthing == len(stim_types)): print("ran out of sounds to play!, time to stall") while(time.time() - start < record_duration): time.sleep(25) ind = 1 auds[ind].stop() auds[ind].play() print("done") mywin.close() return trials
if expInfo['frameRate'] != None: frameDur = 1.0 / round(expInfo['frameRate']) else: frameDur = 1.0 / 60.0 # couldn't get a reliable measure so guess # Initialize components for Routine "trial" trialClock = core.Clock() ISI = core.StaticPeriod(win=win, screenHz=expInfo['frameRate'], name='ISI') grating1 = visual.GratingStim(win=win, name='grating1', tex=u'sqrXsqr', mask=None, ori=0, pos=[0, 0], size=[0.7, 0.7], sf=6, phase=0.0, color=[1, 1, 1], colorSpace=u'rgb', opacity=1, texRes=128, interpolate=True, depth=-1.0) grating2 = visual.GratingStim(win=win, name='grating2', tex=u'sqrXsqr', mask=None, ori=0, pos=[0, 0], size=[0.7, 0.7], sf=6,
def run(self,*args): """ The run method contains your experiment logic. It is equal to what would be in your main psychopy experiment script.py file in a standard psychopy experiment setup. That is all there is too it really. """ exp_conditions=importConditions('trial_conditions.xlsx') trials = TrialHandler(exp_conditions,1) # Inform the ioDataStore that the experiment is using ac # TrialHandler. The ioDataStore will create a table # which can be used to record the actual trial variable values (DV or IV) # in the order run / collected. # self.hub.createTrialHandlerRecordTable(trials) selected_eyetracker_name=args[0] # Let's make some short-cuts to the devices we will be using in this 'experiment'. tracker=self.hub.devices.tracker display=self.hub.devices.display kb=self.hub.devices.keyboard # Start by running the eye tracker default setup procedure. tracker.runSetupProcedure() # Create a psychopy window, full screen resolution, full screen mode... # res=display.getPixelResolution() window=visual.Window(res,monitor=display.getPsychopyMonitorName(), units=display.getCoordinateType(), fullscr=True, allowGUI=False, screen= display.getIndex() ) # Create a dict of image stim for trials and a gaze blob to show gaze position. # display_coord_type=display.getCoordinateType() image_cache=dict() image_names=['canal.jpg','fall.jpg','party.jpg','swimming.jpg','lake.jpg'] for iname in image_names: image_cache[iname]=visual.ImageStim(window, image=os.path.join('./images/',iname), name=iname,units=display_coord_type) gaze_dot =visual.GratingStim(window,tex=None, mask="gauss", pos=(0,0 ),size=(66,66),color='green', units=display_coord_type) instructions_text_stim = visual.TextStim(window, text='', pos = [0,0], height=24, color=[-1,-1,-1], colorSpace='rgb',alignHoriz='center', alignVert='center',wrapWidth=window.size[0]*.9) # Update Instruction Text and display on screen. # Send Message to ioHub DataStore with Exp. Start Screen display time. # instuction_text="Press Any Key to Start Experiment." instructions_text_stim.setText(instuction_text) instructions_text_stim.draw() flip_time=window.flip() self.hub.sendMessageEvent(text="EXPERIMENT_START",sec_time=flip_time) # wait until a key event occurs after the instructions are displayed self.hub.clearEvents('all') kb.waitForPresses() # Send some information to the ioHub DataStore as experiment messages # including the eye tracker being used for this session. # self.hub.sendMessageEvent(text="IO_HUB EXPERIMENT_INFO START") self.hub.sendMessageEvent(text="ioHub Experiment started {0}".format(getCurrentDateTimeString())) self.hub.sendMessageEvent(text="Experiment ID: {0}, Session ID: {1}".format(self.hub.experimentID,self.hub.experimentSessionID)) self.hub.sendMessageEvent(text="Stimulus Screen ID: {0}, Size (pixels): {1}, CoordType: {2}".format(display.getIndex(),display.getPixelResolution(),display.getCoordinateType())) self.hub.sendMessageEvent(text="Calculated Pixels Per Degree: {0} x, {1} y".format(*display.getPixelsPerDegree())) self.hub.sendMessageEvent(text="Eye Tracker being Used: {0}".format(selected_eyetracker_name)) self.hub.sendMessageEvent(text="IO_HUB EXPERIMENT_INFO END") self.hub.clearEvents('all') t=0 for trial in trials: # Update the instruction screen text... # instuction_text="Press Space Key To Start Trial %d"%t instructions_text_stim.setText(instuction_text) instructions_text_stim.draw() flip_time=window.flip() self.hub.sendMessageEvent(text="EXPERIMENT_START",sec_time=flip_time) # wait until a space key event occurs after the instructions are displayed kb.waitForPresses(keys=' ') # So request to start trial has occurred... # Clear the screen, start recording eye data, and clear all events # received to far. # flip_time=window.flip() trial['session_id']=self.hub.getSessionID() trial['trial_id']=t+1 trial['TRIAL_START']=flip_time self.hub.sendMessageEvent(text="TRIAL_START",sec_time=flip_time) self.hub.clearEvents('all') tracker.setRecordingState(True) # Get the image name for this trial # imageStim=image_cache[trial['IMAGE_NAME']] # Loop until we get a keyboard event # run_trial=True while run_trial is True: # Get the latest gaze position in dispolay coord space.. # gpos=tracker.getLastGazePosition() if isinstance(gpos,(tuple,list)): # If we have a gaze position from the tracker, draw the # background image and then the gaze_cursor. # gaze_dot.setPos(gpos) imageStim.draw() gaze_dot.draw() else: # Otherwise just draw the background image. # imageStim.draw() # flip video buffers, updating the display with the stim we just # updated. # flip_time=window.flip() # Send a message to the ioHub Process / DataStore indicating # the time the image was drawn and current position of gaze spot. # if isinstance(gpos,(tuple,list)): self.hub.sendMessageEvent("IMAGE_UPDATE %s %.3f %.3f"%(iname,gpos[0],gpos[1]),sec_time=flip_time) else: self.hub.sendMessageEvent("IMAGE_UPDATE %s [NO GAZE]"%(iname),sec_time=flip_time) # Check any new keyboard char events for a space key. # If one is found, set the trial end variable. # if ' ' in kb.getPresses(): run_trial = False # So the trial has ended, send a message to the DataStore # with the trial end time and stop recording eye data. # In this example, we have no use for any eye data between trials, so why save it. # flip_time=window.flip() trial['TRIAL_END']=flip_time self.hub.sendMessageEvent(text="TRIAL_END %d"%t,sec_time=flip_time) tracker.setRecordingState(False) # Save the Experiment Condition Variable Data for this trial to the # ioDataStore. # self.hub.addTrialHandlerRecord(trial) self.hub.clearEvents('all') t+=1 # Disconnect the eye tracking device. # tracker.setConnectionState(False) # Update the instruction screen text... # instuction_text="Press Any Key to Exit Demo" instructions_text_stim.setText(instuction_text) instructions_text_stim.draw() flip_time=window.flip() self.hub.sendMessageEvent(text="SHOW_DONE_TEXT",sec_time=flip_time) # wait until any key is pressed kb.waitForPresses() # So the experiment is done, all trials have been run. # Clear the screen and show an 'experiment done' message using the # instructionScreen state. What for the trigger to exit that state. # (i.e. the space key was pressed) # self.hub.sendMessageEvent(text='EXPERIMENT_COMPLETE')
# to draw calibration graphics (target, camera image, etc.) genv = EyeLinkCoreGraphicsPsychoPy(tk, win) pylink.openGraphicsEx(genv) # Calibrate the tracker calib_msg = visual.TextStim(win, text='Press ENTER twice to calibrate') calib_msg.draw() win.flip() tk.doTrackerSetup() # Run 3 trials in a for-loop # in each trial, first show a fixation dot, wait for the participant # to gaze at the fixation dot, then present an image for 2 secs for i in range(3): # Prepare the fixation dot in memory fix = visual.GratingStim(win, tex='None', mask='circle', size=30.0) # Load the image img = visual.ImageStim(win, image='woods.jpg', size=(SCN_W, SCN_H)) # Put tracker in Offline mode before we start recording tk.setOfflineMode() # Start recording tk.startRecording(1, 1, 1, 1) # Cache some samples pylink.msecDelay(100) # Show the fixation dot fix.draw()