def printText(screen, theText, fontSize, theColor): instructions = [] textColor = theColor screensize = screen.size spacebuff = Point() spacebuff.x = screensize[0] / 12 spacebuff.y = screensize[1] - screensize[1] / 12 myFont = pygame.font.Font(None, fontSize) for line in theText: if line == "\n": spacebuff.y = spacebuff.y - myFont.size('A')[1] * 2 else: myLine = line.replace("\n", "") myWords = myLine.split(' ') rangeCount = 1 doesItFit = "" n = len(myWords) while n >= 0: itFits = doesItFit doesItFit = doesItFit + myWords[rangeCount - 1] + (" ") fitVal = screensize[0] - spacebuff.x * 2 if myFont.size(doesItFit)[0] < fitVal: if n == rangeCount: spacebuff.y = spacebuff.y - myFont.size('A')[1] instructions.append( Text(text=doesItFit, position=(spacebuff.x, spacebuff.y), color=theColor, font_size=fontSize)) n = 0 doesItFit = "" break else: rangeCount = rangeCount + 1 itFits = doesItFit else: spacebuff.y = spacebuff.y - myFont.size('A')[1] instructions.append( Text(text=itFits, position=(spacebuff.x, spacebuff.y), color=theColor, font_size=fontSize)) doesItFit = "" n = n - rangeCount + 1 del myWords[0:rangeCount - 1] rangeCount = 1 viewport = Viewport(screen=screen, stimuli=instructions) return instructions, viewport
def __init__(self, screen, tracker): self.size = screen.size self.tracker = tracker if sys.byteorder == 'little': self.byteorder = 1 else: self.byteorder = 0 pylink.EyeLinkCustomDisplay.__init__(self) try: pygame.mixer.init() self.__target_beep__ = pygame.mixer.Sound( os.path.join(script_home, "caltargetbeep.wav")) self.__target_beep__done__ = pygame.mixer.Sound( os.path.join(script_home, "caltargetbeep.wav")) self.__target_beep__error__ = pygame.mixer.Sound( os.path.join(script_home, "caltargetbeep.wav")) except: self.__target_beep__ = None self.__target_beep__done__ = None self.__target_beep__error__ = None self.imagebuffer = array.array('L') self.pal = None # Create viewport for calibration / DC cal_screen = screen cal_screen.parameters.bgcolor = (1.0, 1.0, 1.0, 1.0) target = Target2D( size=(10.0, 10.0), color=(0.0, 0.0, 0.0, 1.0), # Set the target color (RGBA) black orientation=0.0) self.cal_vp = Viewport(screen=cal_screen, stimuli=[target]) # Create viewport for camera image screen text = Text( text="Eye Label", color=(0.0, 0.0, 0.0), # alpha is ignored (set with max_alpha_param) position=(cal_screen.size[0] / 2, int(screen.size[1] * 0.1)), font_size=20, anchor='center') img = Image.new( "RGBX", (int(screen.size[0] * 0.75), int(screen.size[1] * 0.75))) image = TextureStimulus(mipmaps_enabled=0, texture=Texture(img), size=(int(screen.size[0] * 0.75), int(screen.size[1] * 0.75)), texture_min_filter=gl.GL_LINEAR, position=(cal_screen.size[0] / 2.0, cal_screen.size[1] / 2.0), anchor='center') self.image_vp = Viewport(screen=cal_screen, stimuli=[text, image]) self.width = cal_screen.size[0] self.height = cal_screen.size[1]
def text(self, tx, p=None): if p == None: a, pos = 'center', (self.mid_x, self.mid_y) else: a, pos = 'left', (0.2 * self.mid_x, (1.2 - 0.2 * p) * self.mid_y) return Text(anchor=a, position=pos, font_size=self.font_s - 10, color=self.black, text=tx)
def printWord(screen, theText, fontSize, theColor): x = (screen.get_size()[0] / 2) y = screen.get_size()[1] / 2 myFont = pygame.font.Font(None, fontSize) x = x - (myFont.size(theText)[0] / 2) y = y - (myFont.size(theText)[1] / 2) word = Text(text=theText, position=(x, y), color=theColor, font_size=fontSize) viewport = Viewport(screen=screen, stimuli=[word]) return word, viewport
def printText(screen, theText, fontSize, theColor): instructions = [] textColor = theColor screensize = screen.get_size() spacebuff = Point() spacebuff.x = screensize[0] / 12 spacebuff.y = screensize[1] - (screensize[1] / 12) myFont = pygame.font.Font(None, fontSize) fittedText = [] theText = theText.split('\n') for line in theText: myWords = line.split(' ') doesItFit = "" while len(myWords): word = myWords.pop(0) itFits = doesItFit doesItFit += word + (" ") fitVal = screensize[0] - spacebuff.x * 2 if myFont.size(doesItFit)[0] < fitVal: itFits = doesItFit else: fittedText.append(doesItFit) doesItFit = "" fittedText.append(itFits) xpos = screen.size[0] / 2 ypos = screen.size[1] / 2 + len(fittedText) / 2 * myFont.size( fittedText[0])[1] for line in fittedText: instructions.append( Text(text=line, anchor='center', position=(xpos, ypos - fittedText.index(line) * myFont.size(line)[1]), color=theColor, font_size=fontSize)) viewport = Viewport(screen=screen, stimuli=instructions) return instructions, viewport
def printWord(screen, theText, fontSize, theColor): instructions = [] textColor = theColor screensize = screen.size spacebuff = Point() myFont = pygame.font.Font(None, fontSize) fontX = myFont.size(theText)[0] fontY = myFont.size(theText)[1] spacebuff.x = (screensize[0] / 2) - (fontX / 2) spacebuff.y = (screensize[1] / 2) - (fontY / 2) print fontX, fontY instructions.append( Text(text=theText, position=(spacebuff.x, spacebuff.y), color=theColor, font_size=fontSize)) viewport = Viewport(screen=screen, stimuli=instructions) return instructions, viewport
scale = min(scale_x, scale_y) # maintain aspect ratio movie_texture = MovieTexture(movie=movie) stimulus = TextureStimulus( texture=movie_texture, position=(screen.size[0] / 2.0, screen.size[1] / 2.0), anchor='center', mipmaps_enabled=False, # can't do mipmaps with QuickTime movies shrink_texture_ok=True, size=(width * scale, height * scale), ) text = Text( text="Vision Egg QuickTime movie demo - Press any key to quit", position=(screen.size[0] / 2, screen.size[1]), anchor='top', color=(1.0, 1.0, 1.0), ) viewport = Viewport(screen=screen, stimuli=[stimulus, text]) movie.StartMovie() frame_timer = FrameTimer() quit_now = 0 while not quit_now: for event in pygame.event.get(): if event.type in (QUIT, KEYDOWN, MOUSEBUTTONDOWN): quit_now = 1 movie.MoviesTask(0) screen.clear() viewport.draw()
################################# # Initialize OpenGL objects # ################################# # Initialize OpenGL graphics screen. #screen = get_default_screen() screen = VisionEgg.Core.Screen( fullscreen=1, size=(1024, 768)) # get_default_screen crashes my laptop screen.parameters.bgcolor = (0.0, 0.0, 0.0, 0.0) screen_half_x = screen.size[0] / 2 screen_half_y = screen.size[1] / 2 str_instruct_1 = Text(text='Experiment', position=(screen_half_x, screen_half_y), font_size=40, anchor='center') str_instruct_2 = WrappedText(text='placeholder', position=(150, screen.size[1] - 100), font_size=40, size=(800, 500)) fixation = TextureStimulus(texture=normalFix, internal_format=gl.GL_RGBA, max_alpha=1.0, size=(150, 150), position=(screen_half_x, screen_half_y), anchor='center') sampleFix = TextureStimulus(texture=Texture('images/allFix.png'),
################################# # Initialize OpenGL graphics screen. screen = get_default_screen() # Set the background color to white (RGBA). screen.parameters.bgcolor = (1.0, 1.0, 1.0, 1.0) # Create an instance of the Target2D class with appropriate parameters. target = Target2D(size=(25.0, 10.0), anchor='center', color=(0.0, 0.0, 0.0, 1.0)) # Set the target color (RGBA) black text = Text(text="Press Esc to quit, arrow keys to change size of target.", position=(screen.size[0] / 2.0, 5), anchor='bottom', color=(0.0, 0.0, 0.0, 1.0)) # Create a Viewport instance viewport = Viewport(screen=screen, stimuli=[target, text]) ################ # Math stuff # ################ def cross_product(b, c): """Cross product between vectors, represented as tuples of length 3.""" det_i = b[1] * c[2] - b[2] * c[1] det_j = b[0] * c[2] - b[2] * c[0] det_k = b[0] * c[1] - b[1] * c[0]
def text(self, tx, pos): #return viewport text object return Text(anchor='center', position=pos, text=tx, font_size=70, color=(0, 0, 0))
def doSim(self, trial, road, duration, tau, doEyetrack): # Measure sample rate in order to calculate delay buffer sample_rate = self.screen.measure_refresh_rate(2.0) print "Sample rate: " + str(sample_rate) #sample_rate = 60 self.doEyetrack = doEyetrack self.pos_ring = RingBuffer(self.center, int(math.floor(tau * sample_rate)) + 1) print("Ring Buffer:: size: " + str(self.pos_ring.size)) if doEyetrack: import pylink from EyeLinkCoreGraphicsVE import EyeLinkCoreGraphicsVE self.tracker = pylink.EyeLink() if self.tracker == None: print "Error: Eyelink is not connected" sys.exit() genv = EyeLinkCoreGraphicsVE(self.screen, self.tracker) pylink.openGraphicsEx(genv) #Opens the EDF file. edfFileName = "TRIAL" + str(trial) + ".EDF" self.tracker.openDataFile(edfFileName) pylink.flushGetkeyQueue() self.tracker.sendCommand("screen_pixel_coords = 0 0 %d %d" % (VisionEgg.config.VISIONEGG_SCREEN_W, VisionEgg.config.VISIONEGG_SCREEN_H)) tracker_software_ver = 0 eyelink_ver = self.tracker.getTrackerVersion() if eyelink_ver == 3: tvstr = self.tracker.getTrackerVersionString() vindex = tvstr.find("EYELINK CL") tracker_software_ver = int( float(tvstr[(vindex + len("EYELINK CL")):].strip())) if eyelink_ver >= 2: self.tracker.sendCommand("select_parser_configuration 0") if eyelink_ver == 2: #turn off scenelink camera stuff self.tracker.sendCommand("scene_camera_gazemap = NO") else: self.tracker.sendCommand("saccade_velocity_threshold = 35") self.tracker.sendCommand( "saccade_acceleration_threshold = 9500") # set EDF file contents self.tracker.sendCommand( "file_event_filter = LEFT,RIGHT,FIXATION,SACCADE,BLINK,MESSAGE,BUTTON" ) if tracker_software_ver >= 4: self.tracker.sendCommand( "file_sample_data = LEFT,RIGHT,GAZE,AREA,GAZERES,STATUS,HTARGET" ) else: self.tracker.sendCommand( "file_sample_data = LEFT,RIGHT,GAZE,AREA,GAZERES,STATUS") # set link data (used for gaze cursor) self.tracker.sendCommand( "link_event_filter = LEFT,RIGHT,FIXATION,SACCADE,BLINK,BUTTON") if tracker_software_ver >= 4: self.tracker.sendCommand( "link_sample_data = LEFT,RIGHT,GAZE,GAZERES,AREA,STATUS,HTARGET" ) else: self.tracker.sendCommand( "link_sample_data = LEFT,RIGHT,GAZE,GAZERES,AREA,STATUS") if not self.doneSetup: self.tracker.doTrackerSetup() self.doneSetup = True else: while 1: try: error = self.tracker.doDriftCorrect( self.screen.size[0] / 2, self.screen.size[1] / 2, 1, 1) if error != 27: # ?? from example break else: self.tracker.doTrackerSetup() except: break self.screen.parameters.bgcolor = 106.0 / 255.0, 147.0 / 255.0, 0.0 # Load road data from file and create an image roadArray = numpy.loadtxt('road' + str(road) + '.txt') # Convert to a Path roadPath = ImagePath.Path( map(lambda xy: (xy[0], xy[1]), roadArray.tolist())) # Use Path to create a plot of the road im = Image.new("RGB", (2000, 100), (50, 50, 50)) draw = ImageDraw.Draw(im) # draw each side of the road separately draw.line(roadPath[:4000], fill=(200, 200, 200)) draw.line(roadPath[4000:], fill=(200, 200, 200)) del draw # Lay out a road texture in the x-z plane roadTexture = Texture(im) del im eye_height = 2.5 vertices = [(-10, -eye_height, 0), (-10, -eye_height, -1000), (10, -eye_height, 0), (10, -eye_height, -1000)] rect = TextureStimulus3D(texture=roadTexture, lowerleft=vertices[0], lowerright=vertices[1], upperleft=vertices[2], upperright=vertices[3]) # We will use these later for our camera transforms self.camera_matrix = ModelView() self.frame_timer = FrameTimer() self.outf = open( 'steersim-' + str(trial) + '-' + str(road) + '-out.txt', 'wb') # Vewport for the road viewport3D = Viewport( screen=self.screen, projection=SimplePerspectiveProjection(fov_x=75.2), camera_matrix=self.camera_matrix, stimuli=[rect]) # Construct a sky sky_l = 0 sky_r = self.screen.size[0] sky_t = self.screen.size[1] sky_b = self.screen.size[1] / 2 sky_vertices = [(sky_l, sky_t, 0), (sky_r, sky_t, 0), (sky_r, sky_b, 0), (sky_l, sky_b, 0)] sky = Rectangle3D(color=(144.0 / 255.0, 190.0 / 255.0, 1.0), vertex1=sky_vertices[0], vertex2=sky_vertices[1], vertex3=sky_vertices[2], vertex4=sky_vertices[3]) wheelTexture = Texture('wheel.png') self.wheel = TextureStimulus(texture=wheelTexture, internal_format=gl.GL_RGBA, position=(self.center, -75), anchor='center') # display the sky in its own viewport viewport2D = Viewport(screen=self.screen) viewport2D.parameters.stimuli = [sky, self.wheel] self.init_state() askText = Text(text='Press a key to start', anchor='center', position=(self.center, self.screen.size[1] / 2)) splash = Viewport(screen=self.screen) splash.parameters.stimuli = [askText] self.askForNext = Presentation(go_duration=(0.5, 'seconds'), viewports=[splash]) self.askForNext.add_controller( None, None, FunctionController(during_go_func=self.wait_for_key)) self.askForNext.parameters.enter_go_loop = True self.askForNext.run_forever() self.simPres = Presentation(go_duration=(duration, 'seconds'), viewports=[viewport3D, viewport2D], handle_event_callbacks=[ (pygame.KEYDOWN, self.check_keypress) ]) self.simPres.add_controller( None, None, FunctionController(during_go_func=self.update)) if doEyetrack: startTime = pylink.currentTime() self.tracker.sendMessage("SYNCTIME %d" % (pylink.currentTime() - startTime)) error = self.tracker.startRecording(1, 1, 1, 1) self.tracker.sendMessage("PRES %d START" % (trial)) self.simPres.go() if doEyetrack: self.tracker.sendMessage("PRES %d END" % (trial)) self.tracker.stopRecording() # File transfer and cleanup! self.tracker.setOfflineMode() pylink.msecDelay(500) #Close the file and transfer it to Display PC self.tracker.closeDataFile() self.tracker.receiveDataFile(edfFileName, edfFileName) self.outf.close() if self.quit: raise SystemExit
def showRSVP(viewport): global imageCounter global previous_t global numImsPerBlock global texture_object global curImList global numTargets global numNontargets global presentationFreq global doingTraining global ctrl_key doingTraining = 0 # # Load the parameters for the presentation # presentationParams = getPracticeSessionParams() SDL_Maximize() presentationFreq = int(presentationParams[0]) block_idx = 0 ctrl_key = 0 while (1): internalState = 0 # here we are the block state if (doingTraining==1): presentationParams = getPracticeSessionParams() presentationFreq = int(presentationParams[0]) SDL_Maximize() doingTraining = 0 elif (doingTraining==2): break imageCounter = 0 internalState = 1 # About to enter the presentation mode. previous_t = 0; # Load training block if (input_from == 1): curList = sampleTrainImages(trainPath,numTargets,numNontargets,alreadySampled,withreplacement) elif (input_from == 2): print "loading XML" curList = sampleTrainImagesXML(inputXMLfile,numTargets,numNontargets) else: msgbox('The input type specified is invalid, check the value of input_from parameter in configuration.ini file.\n Terminating RSVP session.') my_logger.error('The input type specified is invalid, check the value of input_from parameter in configuration.ini file.') break; curImList = map(operator.itemgetter(0),curList) curTargList = map(operator.itemgetter(1),curList) curImPath = map(operator.itemgetter(2),curList) curImName = map(operator.itemgetter(3),curList) numImsPerBlock = len(curImList) # fixation cross p.parameters.go_duration = (1,'seconds') viewport.parameters.stimuli=[fixPt] p.go() imageStim = TextureStimulus(texture=imageTex, position = (screen.size[0]/2.0,screen.size[1]/2.0), anchor='center', size=(width*scale,height*scale), mipmaps_enabled = 0, texture_min_filter=gl.GL_LINEAR) texture_object = imageStim.parameters.texture.get_texture_object() texture_object.put_sub_image( curImList[0] ) p.add_controller(None, None, FunctionController(during_go_func=every_frame_func) ) #p.parameters.go_duration = ((numImsPerBlock+1)/float(presentationFreq),'seconds') p.parameters.go_duration = (2*(numImsPerBlock+1)/float(presentationFreq),'seconds') viewport.parameters.stimuli=[imageStim] p.go(); block_idx = block_idx + 1 internalState = 0 # here we are the block state # fixation cross p.parameters.go_duration = (1,'seconds') p.controllers = [] viewport.parameters.stimuli=[fixPt] p.go() # please wait (while we get results) p.parameters.go_duration = (1,'frames') p.controllers = [] viewport.parameters.stimuli=[pleaseWait] p.go() totElements = len(curImName) curImNamePrefixed = [('_' + str(eventList[curTargList[i]]) + '_' + curImName[i]) for i in range(totElements)] # get the data strucute to be able to show potential feedback during evaluation methodology. dataStruct = simulateCBCIoutput(block_idx,curTargList,curImPath,curImName) # Call to the external feedback screen status = show_feedback(screen,viewport,dataStruct,curTargList) if (status == 1): # # Show custom feedback screen # p.parameters.go_duration = ('forever',) p.go() else: # # Show default feedback. # #print reslist sttext = "Blocks shown: " + str(block_idx) statisticsText = Text( text = sttext, position = (screen.size[0]/2.0,screen.size[1]/2), font_name=fname,font_size = fsize, anchor = 'center', color = (0.0,0.0,0.0,1.0)) p.parameters.go_duration = ('forever',) viewport.parameters.stimuli=[statisticsText] p.go()
#################################################### # Initialize OpenGL graphics screen. screen = get_default_screen() # Set the background color to white (RGBA). #screen.parameters.bgcolor = (1.0,1.0,1.0,1.0) screen.parameters.bgcolor = (bg_values[0],bg_values[1],bg_values[2],bg_values[3]) # make Fixation Cross texture fixPt = FixationCross(position= (screen.size[0]/2,screen.size[1]/2), size=(25,25), texture_size=(30,30)) # Intro instruction screen introTexts = [Text( text = "You will be shown a block of images; look for target objects.", font_name=fname,font_size = fsize, position = (screen.size[0]/2.0,screen.size[1]/2), anchor = 'center', color = (0.0,0.0,0.0,1.0))] introTexts.append(Text( text = "At the end of each block, press the space bar to continue.", font_name=fname,font_size = fsize, position = (screen.size[0]/2.0,screen.size[1]/2), anchor = 'center', color = (0.0,0.0,0.0,1.0))) introTexts.append(Text( text = " When instructed, press 't' to finish training.", position = (screen.size[0]/2.0,screen.size[1]/2), font_name=fname,font_size = fsize, anchor = 'center', color = (0.0,0.0,0.0,1.0))) introTexts.append(Text( text = "Please press the space bar to start.", position = (screen.size[0]/2.0,screen.size[1]/2), font_name=fname,font_size = fsize, anchor = 'center', color = (0.0,0.0,0.0,1.0))) # Please wait screen pleaseWait = Text( text = "Please Wait.", position = (screen.size[0]/2.0,screen.size[1]/2), font_name=fname,font_size = fsize, anchor = 'center', color = (0.0,0.0,0.0,1.0))
def quit(dummy_arg=None): p.parameters.go_duration = (0, 'frames') def keydown(event): if event.key == pygame.locals.K_ESCAPE: quit() if not filename: text = Text( text= "Error: Use MPEG file as command line argument - Press Esc to quit", position=(screen.size[0] / 2, screen.size[1]), anchor='top', font_size=24, color=(1.0, 0.0, 0.0)) text2 = Text( text="(If you have a free MPEG to contribute, it could go here.)", position=(screen.size[0] / 2, screen.size[1] / 2), anchor='center', font_size=20, color=(1.0, 1.0, 1.0)) viewport = Viewport(screen=screen, stimuli=[text, text2]) p = Presentation( go_duration=('forever', ), viewports=[viewport], handle_event_callbacks=[(pygame.locals.QUIT, quit),
def __init__(self): #assign parsed options self.options = self.parse() self.subjID = self.options.subjID self.number = self.options.number self.feedback = self.options.feedback self.version = int(self.options.version) self.counter = int(self.options.counter) #experiment constants self.init_fix_time = .500 self.flash_time = 1.000 self.choice_time = 3.000 self.end_fix_time = 0.500 self.feedback_time = 0.250 self.feedback_fix_time = 0.250 if self.version == 1: self.big_fix_time = 20.000 else: self.big_fix_time = 16.000 #colors self.black = (0.0, 0.0, 0.0) self.white = (1.0, 1.0, 1.0) self.blue = (0.0, 0.0, 1.0) #screen/VisionEgg setup self.screen = Screen(bgcolor=(1.0, 1.0, 1.0), fullscreen=True) self.midx = self.screen.size[0] / 2.0 self.midy = self.screen.size[1] / 2.0 self.start = Text(anchor='center', position=(self.midx, self.midy), text="Wait for trigger +", font_size=70, color=self.black) self.fix = FixationCross(position=(self.midx, self.midy), size=(50, 50)) self.yes = TextureStimulus(texture=Texture('yes.png'), color=self.white, anchor='center', position=(self.midx, self.midy), size=(250, 250)) self.no = TextureStimulus(texture=Texture('no.png'), anchor='center', position=(self.midx, self.midy), size=(200, 200)) self.view_start = Viewport(screen=self.screen, stimuli=[self.start]) self.view_present = Viewport(screen=self.screen) self.p = Presentation() self.order = self.make_order(self.version, self.counter) self.L = [] self.R = [] self.i = 0 self.j = 0 self.last_start = 0 self.ideal = 0 self.trial_n = 0 self.RT = None self.given_response = None self.correct = None self.condition = None self.choice_onset = None self.acc = None #data file setup self.data_file = self.make_file() self.wr = csv.writer(self.data_file, quoting=csv.QUOTE_MINIMAL) self.wr.writerow([ 'SubjID', 'RunNumber', 'Feedback', 'Version', 'Counterbalance', 'TrialNumber', 'TrialOnset', 'ChoiceOnset', 'Condition', 'LeftGrid', 'RightGrid', 'CorrectAnswer', 'Response', 'Accuracy', 'RT' ])
print "Horizontal screen resolution needs to be at least 1024." raw_input("Press enter to exit") sys.exit() screen = VisionEgg.Core.Screen(size=(user32.GetSystemMetrics(0), user32.GetSystemMetrics(1)), fullscreen=True) #screen=VisionEgg.Core.Screen(size=(1024,768),fullscreen=False) screen.parameters.bgcolor = (0.0, 0.0, 0.0, 0.0) d_screen_half_x = screen.size[0] / 2 d_screen_half_y = screen.size[1] / 2 # Vision Egg objects title = Text(text='Please choose the option you prefer in each case', color=(1.0, 1.0, 1.0), position=(d_screen_half_x, d_screen_half_y + 160), font_size=fontsize, anchor='center') title2 = Text(text=' ', color=(1.0, 1.0, 1.0), position=(d_screen_half_x, d_screen_half_y - 160), font_size=30, anchor='center') newset = Text(text='Next delay: %s' % (delays[currentdelay]), color=(1.0, 1.0, 1.0), position=(d_screen_half_x, d_screen_half_y + 160), font_size=fontsize, anchor='center')
size=(800, 800), spatial_freq=10.0 / screen.size[0], # units of cycles/pixel temporal_freq_hz=1.0, num_samples=1024, orientation=45.0) text_color = (0.0, 0.0, 1.0) # RGB ( blue) xpos = 10.0 yspace = 5 text_params = {'anchor': 'lowerleft', 'color': text_color, 'font_size': 20} text_stimuli = [] ypos = 0 text_stimuli.append( Text(text="Numeric keypad changes grating orientation.", position=(xpos, ypos), **text_params)) ypos += text_stimuli[-1].parameters.size[1] + yspace tf_text = Text(text="'t/T' changes TF (now %.2f hz)" % (grating_stimulus.parameters.temporal_freq_hz), position=(xpos, ypos), **text_params) text_stimuli.append(tf_text) ypos += text_stimuli[-1].parameters.size[1] + yspace text_stimuli.append( Text(text="'-' shrinks window, '+' grows window (slow)", position=(xpos, ypos), **text_params)) ypos += text_stimuli[-1].parameters.size[1] + yspace
window_coords = viewport3D.eye_2_window( eye_coord_vertex) # eye to window text.parameters.text = '<- %.1f, %.1f' % (window_coords[0], window_coords[1]) text.parameters.position = window_coords[0], window_coords[1] viewport3D = Viewport(screen=screen, projection=SimplePerspectiveProjection(fov_x=90.0), camera_matrix=camera_matrix, stimuli=[rect]) vertex_labels = [] for vertex in vertices: vertex_labels.append(Text( text='temporary text', anchor='left', )) other_text = [] other_text.append( Text( text='Pixel positions (x,y) calculated from 3D coordinates', position=(screen.size[0] / 2, screen.size[1]), anchor='top', )) other_text.append(Text( text='----> x', position=(10, 10), anchor='left', ))
mask = SphereWindow( radius=1.0 * 0.90, # make sure window is inside sphere with grating window_shape_radius_parameter=40.0, slices=50, stacks=50) text_color = (0.0, 0.0, 1.0) # RGB ( blue) xpos = 10.0 yspace = 5 text_params = {'anchor': 'lowerleft', 'color': text_color, 'font_size': 20} text_stimuli = [] ypos = 0 text_stimuli.append( Text( text="(Hold mouse button to prevent re-orienting stimulus with mask.)", position=(xpos, ypos), **text_params)) ypos += text_stimuli[-1].parameters.size[1] + yspace text_stimuli.append( Text(text="Numeric keypad changes grating orientation.", position=(xpos, ypos), **text_params)) ypos += text_stimuli[-1].parameters.size[1] + yspace filter_text = Text(text="temporary text", position=(xpos, ypos), **text_params) set_filter_and_text() text_stimuli.append(filter_text) ypos += text_stimuli[-1].parameters.size[1] + yspace sf_cutoff_text = Text(
bpps = [32, 24, 16, 0] sizes = [(640, 480), (800, 600), (1024, 768), (1280, 1024)] for bpp in bpps: success = False for size in sizes: print 'trying to initialize window %d x %d, %d bpp' % (size[0], size[1], bpp) try: screen = VisionEgg.Core.Screen( size=size, fullscreen=False, preferred_bpp=bpp, maxpriority=False, hide_mouse=True, sync_swap=True, ) success = True except: pass if success: break # we don't need to try other resolutions if success: break stims = [] for i in range(2000): print i t = Text() stims.append(t)
def showTargets(viewport): global imageCounter global previous_t global numImsPerBlock global texture_object global curImList global presentationFreq # use/update the global value for presetnation frequency global doingTraining global ctrl_key ctrl_key = 0 doingTraining = 0 Pfreq_default = presentationFreq SDL_Maximize() # # Show instructures # sttext = "You will be shown examples of target images... press space bar" statisticsText = Text( text = sttext, position = (screen.size[0]/2.0,screen.size[1]/2), font_name=fname,font_size = fsize, anchor = 'center', color = (0.0,0.0,0.0,1.0)) p.parameters.go_duration = ('forever',) viewport.parameters.stimuli=[statisticsText] p.go() while (1): imageCounter = 0 previous_t = 0; block_idx = 0 numTargets = 20 numNontargets = 0 presentationFreq = 5 # show 1 target every second to familiarize the user with the target set. # Load training block # curList = sampleTrainImages(trainPath,numTargets,numNontargets,alreadySampled,withreplacement) if (input_from == 1): curList = sampleTrainImages(trainPath,numTargets,numNontargets,alreadySampled,withreplacement) elif (input_from == 2): curList = sampleTrainImagesXML(inputXMLfile,numTargets,numNontargets) else: msgbox('The input type specified is invalid, check the value of input_from parameter in configuration.ini file.\n Terminating RSVP session.') my_logger.error('The input type specified is invalid, check the value of input_from parameter in configuration.ini file.') break; curImList = map(operator.itemgetter(0),curList) curTargList = map(operator.itemgetter(1),curList) curImPath = map(operator.itemgetter(2),curList) curImName = map(operator.itemgetter(3),curList) numImsPerBlock = len(curImList) # fixation cross p.parameters.go_duration = (1,'seconds') viewport.parameters.stimuli=[fixPt] p.go() imageStim = TextureStimulus(texture=imageTex, position = (screen.size[0]/2.0,screen.size[1]/2.0), anchor='center', size=(width*scale,height*scale), mipmaps_enabled = 0, texture_min_filter=gl.GL_LINEAR) texture_object = imageStim.parameters.texture.get_texture_object() texture_object.put_sub_image( curImList[0] ) p.add_controller(None, None, FunctionController(during_go_func=every_frame_func) ) p.parameters.go_duration = (2*(numImsPerBlock+1)/float(presentationFreq),'seconds') viewport.parameters.stimuli=[imageStim] p.go(); block_idx = block_idx + 1 # fixation cross p.parameters.go_duration = (2,'seconds') p.controllers = [] viewport.parameters.stimuli=[fixPt] p.go() # # Show instructures # response = ynbox(msg='Would you like to see these targets again?', title=' ', choices=('No', 'Yes'), image=None) # default No SDL_Maximize() if (response == 1): presentationFreq = Pfreq_default break
#Make Rectangles and numbers x = screen.size[0] / 4 y = screen.size[1] / 2 problem = "%s or %s" % (n1, n2) box1 = TextureStimulus(color=[1, 1, 1], anchor='center', position=[x, y], size=[boxsize, boxsize]) box2 = TextureStimulus(color=[1, 1, 1], anchor='center', position=[x * 3, y], size=[boxsize, boxsize]) ns1 = Text(text=str(n1), anchor='center', position=[x, y], color=[0, 0, 0], font_size=fontsize) ns2 = Text(text=str(n2), anchor='center', position=[x * 3, y], color=[0, 0, 0], font_size=fontsize) expPort = Viewport(screen=screen, stimuli=[box1, box2, ns1, ns2]) subject.inputData(trial, "n1", n1) subject.inputData(trial, "n2", n2) subject.inputData(trial, "problem", problem) #format problem
print " " print "Horizontal screen resolution needs to be at least 1024." raw_input("Press enter to exit") sys.exit() screen = VisionEgg.Core.Screen(size=(user32.GetSystemMetrics(0), user32.GetSystemMetrics(1)), fullscreen=True) screen.parameters.bgcolor = (0.0, 0.0, 0.0, 0.0) d_screen_half_x = screen.size[0] / 2 d_screen_half_y = screen.size[1] / 2 # Vision Egg objects title = Text( text='Please choose the amount and delay combination you prefer between', color=(1.0, 1.0, 1.0), position=(d_screen_half_x, d_screen_half_y + 120), font_size=40, anchor='center') title2 = Text(text='each pair of options. Press 5 to continue.', color=(1.0, 1.0, 1.0), position=(d_screen_half_x, d_screen_half_y + 80), font_size=40, anchor='center') newset = Text(text="Next delay: %s" % (delays[currentdelay]), color=(1.0, 1.0, 1.0), position=(d_screen_half_x, d_screen_half_y + 120), font_size=60, anchor='center')
# write the experiment plan to disk so we can compare it to behavioral data planfile = open(file_name_plan, 'a') plan_writer = csv.writer(planfile) plan_writer.writerows(exp_plan) planfile.close() print "expected experiment time: " + str(len(exp_plan) * trial_dur) print "ips: " + str(len(exp_plan) * trial_dur / tr) # wait for trigger instructions = Text( text="Waiting for trigger", font_size=32, color=(1, 1, 1), anchor='center', position=(screen.size[0] / 2, screen.size[1] / 2), ) instructions_attendFACE = Text( text="Attend to FACES", font_size=32, color=(1, 1, 1), anchor='center', position=(screen.size[0] / 2, screen.size[1] / 2), ) instructions_attendSCENE = Text( text="Attend to SCENES", font_size=32,
def __init__(self, screen, tracker): self.size = screen.size self.tracker = tracker pylink.EyeLinkCustomDisplay.__init__(self) #display.init() pygame.mixer.init() #display.set_mode((1280, 1024), FULLSCREEN |DOUBLEBUF |RLEACCEL |DOUBLEBUF ,16) #self.__target_beep__ = pygame.mixer.Sound(join(PATH,"caltargetbeep.wav")) #self.__target_beep__done__ = pygame.mixer.Sound(join(PATH,"caltargetbeep.wav")) #self.__target_beep__error__ = pygame.mixer.Sound(join(PATH,"caltargetbeep.wav")) self.imagebuffer = array.array('l') self.pal = None # Create viewport for calibration / DC cal_screen = screen cal_screen.parameters.bgcolor = (1.0, 1.0, 1.0, 1.0) innertarget = FilledCircle(radius=2, anchor='center', color=(0, 0, 0), num_triangles=201) outertarget = FilledCircle(radius=7, anchor='center', color=(1, 1, 1), num_triangles=401) ## target = Target2D(size = (10.0,10.0), ## color = (0.0,0.0,0.0,1.0), # Set the target color (RGBA) black ## orientation = 0.0) self.cal_vp = Viewport(screen=cal_screen, stimuli=[outertarget, innertarget]) # Create viewport for camera image screen text = Text( text="Eye Label", color=(0.0, 0.0, 0.0), # alpha is ignored (set with max_alpha_param) position=(cal_screen.size[0] / 2, int(screen.size[1] * 0.1)), font_size=50, anchor='center') img = Image.new( "RGBX", (int(screen.size[0] * 0.75), int(screen.size[1] * 0.75))) image = TextureStimulus(mipmaps_enabled=0, texture=Texture(img), size=(int(screen.size[0] * 0.75), int(screen.size[1] * 0.75)), texture_min_filter=gl.GL_LINEAR, position=(cal_screen.size[0] / 2.0, cal_screen.size[1] / 2.0), anchor='center') #image = TextureStimulus(mipmaps_enabled=0, # texture=None, # size=(640,480), # texture_min_filter=gl.GL_LINEAR, # position=(cal_screen.size[0]/2.0,cal_screen.size[1]/2.0), # anchor='center') self.image_vp = Viewport(screen=cal_screen, stimuli=[text, image]) self.width = cal_screen.size[0] self.height = cal_screen.size[1]
y = screen.size[1] / 2 if b == "l": my_n1 = numbers[1] my_n2 = numbers[0] else: my_n1 = numbers[0] my_n2 = numbers[1] problemString = "%s | %s" % (my_n1, my_n2) if s == "l": ns1 = Text(text=str(my_n1), angle=a, anchor='center', position=[x * 3, y], color=[255, 255, 255], font_size=fontsize) ns2 = Text(text=str(my_n2), angle=0, anchor='center', position=[x * 4, y], color=[255, 255, 255], font_size=fontsize) else: ns1 = Text(text=str(my_n1), angle=0, anchor='center', position=[x * 3, y],
from VisionEgg.Dots import * screen = get_default_screen() screen.parameters.bgcolor = (0.0, 0.0, 0.0, 0.0) # black (RGBA) dots = DotArea2D(position=(screen.size[0] / 2.0, screen.size[1] / 2.0), size=(300.0, 300.0), signal_fraction=0.1, signal_direction_deg=180.0, velocity_pixels_per_sec=10.0, dot_lifespan_sec=5.0, dot_size=3.0, num_dots=100) text = Text(text="Vision Egg makeMovie2 demo.", position=(screen.size[0] / 2, 2), anchor='bottom', color=(1.0, 1.0, 1.0)) viewport = Viewport(screen=screen, stimuli=[dots, text]) VisionEgg.config.VISIONEGG_MONITOR_REFRESH_HZ = 60.0 # fake framerate VisionEgg.set_time_func_to_frame_locked( ) # force VisionEgg to fake this framerate num_frames = 5 for i in range(num_frames): screen.clear() viewport.draw() swap_buffers() im = screen.get_framebuffer_as_image(buffer='front', format=gl.GL_RGB) filename = "movie_%02d.jpg" % (i + 1)
on = 0, anchor = 'center', position = (screen_half_x + r*cos(ang*3.14159/180.0), \ screen_half_y + r*sin(ang*3.14159/180.0)), size = (tach_len, tach_width), #color = (1.0, 0.0, 0.0, 1.0), color = (1.0, 1.0, 1.0, 1.0), # Draw it in white (RGBA) orientation = ang ) taskSize = 60 taskStimulus = Text( text="+", on=0, color=(1.0, 1.0, 1.0), position=(screen_half_x, screen_half_y - (taskSize + 20)), font_size=taskSize, font_name=None, #font_name=pygame.font.match_font(fonts[122]), anchor='center') arrow_unit = 0.65 * screen.size[1] / 10 shift_unit = 20.0 L = r - 2.0 * shift_unit arrowStimulus = Target2D( on=0, anchor='center', position=(screen_half_x, screen_half_y + L / 2.0), size=(L, 2 * tach_width), color=(1.0, 1.0, 1.0, 1.0), # Draw it in white (RGBA) orientation=90.0)
################################# ## DISPLAY ## ################################# # Initialize OpenGL graphics screen. screen = get_default_screen() # Set the background color to white (RGBA). screen.parameters.bgcolor = (bg_values[0],bg_values[1],bg_values[2],bg_values[3]) # make Fixation Cross texture fixPt = FixationCross(position= (screen.size[0]/2,screen.size[1]/2), size=(25,25), texture_size=(30,30)) # Intro instruction screen introTexts = [Text( text = "In this experiment, you will see " + str(nBlocks) + " blocks of images", position = (screen.size[0]/2.0,screen.size[1]/2 + fsize), font_name=fname,font_size=fsize, anchor = 'center', color = (0.0,0.0,0.0,1.0))] introTexts_bottomline = [Text( text = "with short breaks in between.", position = (screen.size[0]/2.0,screen.size[1]/2 - fsize), font_name=fname,font_size=fsize, anchor = 'center', color = (0.0,0.0,0.0,1.0))] introTexts.append(Text( text = "During one of these breaks, there will be a TARGET SWITCH!", position = (screen.size[0]/2.0,screen.size[1]/2 + fsize), font_name=fname,font_size=fsize, anchor = 'center', color = (0.0,0.0,0.0,1.0))) introTexts_bottomline.append(Text( text = "and you'll be asked to look for a new category.", position = (screen.size[0]/2.0,screen.size[1]/2 - fsize), font_name=fname,font_size=fsize, anchor = 'center', color = (0.0,0.0,0.0,1.0))) introTexts.append(Text( text = "You don't need to press any buttons.", font_name=fname,font_size=fsize, position = (screen.size[0]/2.0,screen.size[1]/2 + fsize), anchor = 'center', color = (0.0,0.0,0.0,1.0))) introTexts_bottomline.append(Text( text = "Blocks will load automatically.", font_name=fname,font_size=fsize, position = (screen.size[0]/2.0,screen.size[1]/2 - fsize), anchor = 'center', color = (0.0,0.0,0.0,1.0))) introTexts.append(Text( text = "The first block is about to start.", font_name=fname,font_size=fsize, position = (screen.size[0]/2.0,screen.size[1]/2 + fsize), anchor = 'center', color = (0.0,0.0,0.0,1.0)))
def show_feedback(screen, viewport, classifierOutput, curTargList): fname = "freesansbold.ttf" fsize = 20 # # sttext = "This is a dummy feedback!!!" # statisticsText = Text( text = sttext, position = (screen.size[0]/2.0,screen.size[1]/2), font_name=fname,font_size = fsize, # anchor = 'center', color = (0.0,0.0,0.0,1.0)) #viewport.parameters.stimuli=[statisticsText] # ###Here we use the input data to determine the identities of the target images #Also, determine when the images were shown during the RSVP, and where they #were re-ranked to after the presentation rlist = [] spots = [] for elem in range(len(curTargList)): if (curTargList[elem] == 1): res = classifierOutput[elem][2].rsplit() rlist.append((res[1], res[3])) spots.append((elem)) #indices of the targets block_idx = res[0] #block number # ###This gives me the initial and final locations of the targets initial_order = [float(rlist[0][0]), float(rlist[1][0])] resort_order = [float(rlist[0][1]), float(rlist[1][1])] # ###Now I need the names of the image files (including locations) of the target images imagename1 = classifierOutput[spots[0]][1] imagename2 = classifierOutput[spots[1]][1] #if in training mode must remove the _160_ from the filename if (imagename1[0] == '_'): imagename1 = imagename1[5:] imagename2 = imagename2[5:] #tie together the filename and filepath filename1 = os.path.join(classifierOutput[spots[0]][0], imagename1) filename2 = os.path.join(classifierOutput[spots[1]][0], imagename2) #want to put the upper thumbnails (names 1&2) in the order they were shown, and then #put the lower thumbnails in the order of their classifier ranking if (resort_order[0] < resort_order[1]): filename3 = filename1 filename4 = filename2 else: filename3 = filename2 filename4 = filename1 #Make some text that specifically tells the presentation and classifier placements of the target images text_offset = .02 * screen.size[1] sttext = "Blocks shown: " + block_idx statisticsText = Text(text=sttext, position=(5.0, screen.size[1] / 2), font_name=fname, font_size=fsize, anchor='left', color=(0.0, 0.0, 0.0, 1.0)) strtext2 = "Presentation Order: " + str(rlist[0][0]) + " " + str( rlist[1][0]) statisticsText2 = Text(text=strtext2, position=(screen.size[0] / 2.0, screen.size[1] / 2 + text_offset), font_name=fname, font_size=fsize, anchor='center', color=(0.0, 0.0, 0.0, 1.0)) strtext3 = "EEG Classifier Order: " + str(rlist[0][1]) + " " + str( rlist[1][1]) statisticsText3 = Text(text=strtext3, position=(screen.size[0] / 2.0, screen.size[1] / 2 - text_offset), font_name=fname, font_size=fsize, anchor='center', color=(0.0, 0.0, 0.0, 1.0)) ################################ ################################ ################################ #This section uses instances of the Target2D class to make boxes and lines #screen.size[0] is the horizontal dimension #the origin for screen coordinates is the lower left corner of the screen # #these are basic parameters for the boxes rectangle_ht = 0.1 * screen.size[1] rectangle_wd = 0.5 * screen.size[0] box_thickness = 5.0 #Location of boxes on the screen box_up_y = 0.5 * screen.size[1] + 0.5 * rectangle_ht + 0.06 * screen.size[1] box_down_y = 0.5 * screen.size[ 1] - 0.5 * rectangle_ht - 0.06 * screen.size[1] #These are basic parameters for the lines display_line_ht = 0.8 * rectangle_ht display_line_wd = 0.65 * 0.01 * rectangle_wd #this is where the lines are located on the screen #vertically # display_line_ver_displacement_up = 0.5*screen.size[1] + 0.5*display_line_ht + .05*screen.size[1] # display_line_ver_displacement_dwn = 0.5*screen.size[1] + 0.5*display_line_ht - rectangle_ht - .05*screen.size[1] display_line_ver_displacement_up = box_up_y - 0.5 * rectangle_ht + 0.5 * display_line_ht display_line_ver_displacement_dwn = box_down_y - 0.5 * rectangle_ht + 0.5 * display_line_ht #horizontally in top rectangle display_line_hor_displacement_top = 0.5 * screen.size[ 0] + -1.0 * 0.5 * rectangle_wd + initial_order[0] * .01 * rectangle_wd display_line_hor_displacement_top2 = 0.5 * screen.size[ 0] + -1.0 * 0.5 * rectangle_wd + initial_order[1] * .01 * rectangle_wd #horizontally in bottom rectangle display_line_hor_displacement_bttm1 = 0.5 * screen.size[ 0] + -1.0 * 0.5 * rectangle_wd + resort_order[0] * .01 * rectangle_wd display_line_hor_displacement_bttm2 = 0.5 * screen.size[ 0] + -1.0 * 0.5 * rectangle_wd + resort_order[1] * .01 * rectangle_wd # ###Size of feedback images image_ht = 0.25 * screen.size[1] image_wd = image_ht # ###Coordinates of upper and lower feedback images [x,y] upper_lt = [ 0.5 * screen.size[0] - 0.2 * screen.size[0], 0.5 * screen.size[1] + 2 * .05 * screen.size[0] + rectangle_ht + 0.5 * image_ht ] upper_rt = [ 0.5 * screen.size[0] + 0.2 * screen.size[0], 0.5 * screen.size[1] + 2 * .05 * screen.size[0] + rectangle_ht + 0.5 * image_ht ] lower_lt = [ 0.5 * screen.size[0] - 0.2 * screen.size[0], 0.5 * screen.size[1] - 2 * .05 * screen.size[0] - rectangle_ht - 0.5 * image_ht ] lower_rt = [ 0.5 * screen.size[0] + 0.2 * screen.size[0], 0.5 * screen.size[1] - 2 * .05 * screen.size[0] - rectangle_ht - 0.5 * image_ht ] ################################ ################################ ################################ #Now we need to start building the feedback images ##### #this makes the upper box box_up_out = Target2D( size=(rectangle_wd + 2 * box_thickness, rectangle_ht + 2 * box_thickness), color=(0.0, 0.0, 0.0, 1.0), # Set the target color (RGBA) black orientation=0.0, position=(screen.size[0] / 2.0, box_up_y)) box_up_in = Target2D( size=(rectangle_wd, rectangle_ht), color=(1.0, 1.0, 1.0, 1.0), # Set the target color (RGBA) white orientation=0.0, position=(screen.size[0] / 2.0, box_up_y)) #viewport_box_up_out = Viewport(screen=screen, stimuli=[box_up_out]) #viewport_box_up_in = Viewport(screen=screen, stimuli=[box_up_in]) ##### #this makes the lower box box_dwn_out = Target2D( size=(rectangle_wd + 2 * box_thickness, rectangle_ht + 2 * box_thickness), color=(0.0, 0.0, 0.0, 1.0), # Set the target color (RGBA) black orientation=0.0, position=(screen.size[0] / 2.0, box_down_y)) box_dwn_in = Target2D( size=(rectangle_wd, rectangle_ht), color=(1.0, 1.0, 1.0, 1.0), # Set the target color (RGBA) white orientation=0.0, position=(screen.size[0] / 2.0, box_down_y)) #viewport_box_dwn_out = Viewport(screen=screen, stimuli=[box_dwn_out]) #viewport_box_dwn_in = Viewport(screen=screen, stimuli=[box_dwn_in]) ##### #this makes a pair of vertical lines (rectangles) in the upper box # vert_line1_up = Target2D( size=(display_line_wd, display_line_ht), color=(1.0, 0.0, 0.0, 1.0), # Set the target color (RGBA) black orientation=0.0, position=(display_line_hor_displacement_top, display_line_ver_displacement_up)) vert_line2_up = Target2D( size=(display_line_wd, display_line_ht), color=(0.0, 1.0, 0.0, 1.0), # Set the target color (RGBA) black orientation=0.0, position=(display_line_hor_displacement_top2, display_line_ver_displacement_up)) # viewport_vert_line1_up = Viewport(screen=screen, stimuli=[vert_line1_up]) # viewport_vert_line2_up = Viewport(screen=screen, stimuli=[vert_line2_up]) # ##### #this makes a pair of vertical lines (rectangles) in the lower box # vert_line1_down = Target2D( size=(display_line_wd, display_line_ht), color=(1.0, 0.0, 0.0, 1.0), # Set the target color (RGBA) black orientation=0.0, position=(display_line_hor_displacement_bttm1, display_line_ver_displacement_dwn)) vert_line2_down = Target2D( size=(display_line_wd, display_line_ht), color=(0.0, 1.0, 0.0, 1.0), # Set the target color (RGBA) black orientation=0.0, position=(display_line_hor_displacement_bttm2, display_line_ver_displacement_dwn)) # viewport_vert_line1_down = Viewport(screen=screen, stimuli=[vert_line1_down]) # viewport_vert_line2_down = Viewport(screen=screen, stimuli=[vert_line2_down]) # #### #this makes instances of the 4 images (2 before and 2 after) #that are shown to the user to offer feedback # # Make textures from the image names before (1&2) and after (3&4) they were re-sorted texture1 = Texture(filename1) texture2 = Texture(filename2) texture3 = Texture(filename3) texture4 = Texture(filename4) #make thumbnails of the textures for user feedback imagestim1 = TextureStimulus(texture=texture1, position=upper_lt, anchor='center', size=(image_wd, image_ht), mipmaps_enabled=0, texture_min_filter=gl.GL_LINEAR, shrink_texture_ok=1) imagestim2 = TextureStimulus(texture=texture2, position=upper_rt, anchor='center', size=(image_wd, image_ht), mipmaps_enabled=0, texture_min_filter=gl.GL_LINEAR, shrink_texture_ok=1) imagestim3 = TextureStimulus(texture=texture3, position=lower_lt, anchor='center', size=(image_wd, image_ht), mipmaps_enabled=0, texture_min_filter=gl.GL_LINEAR, shrink_texture_ok=1) imagestim4 = TextureStimulus(texture=texture4, position=lower_rt, anchor='center', size=(image_wd, image_ht), mipmaps_enabled=0, texture_min_filter=gl.GL_LINEAR, shrink_texture_ok=1) # Create viewports for the thumbnails # imagestim_viewport1 = Viewport(screen=screen, stimuli=[imagestim1]) # imagestim_viewport2 = Viewport(screen=screen, stimuli=[imagestim2]) # imagestim_viewport3 = Viewport(screen=screen, stimuli=[imagestim3]) # imagestim_viewport4 = Viewport(screen=screen, stimuli=[imagestim4]) # ################################# ################################# ###Combine all the viewports so they can be show simultaneously # allviewports = [viewport_box_up_out,viewport_box_dwn_out,viewport_box_up_in,viewport_box_dwn_in,#boxes # viewport_vert_line1_up,viewport_vert_line2_up,viewport_vert_line1_down,viewport_vert_line2_down,#lines # imagestim_viewport1,imagestim_viewport2,imagestim_viewport3,imagestim_viewport4]#thumbnails viewport.parameters.stimuli = [ box_up_out, box_up_in, box_dwn_out, box_dwn_in, #boxes vert_line1_up, vert_line2_up, vert_line1_down, vert_line2_down, #lines imagestim1, imagestim2, imagestim3, imagestim4, #thumbnails statisticsText, statisticsText2, statisticsText3 ] #text return 1 return viewport