def pause_frame(window: visual.window.Window, duration_time: float): """ Purpose ------- Define the pause frame which will last for duration_time. Parameters ---------- window : visual.window.Window Psychopy window. duration_time : float Time of duration of the pause in seconds. Returns ------- None. """ text = visual.TextBox(window, text='This pause will last for ' + str(duration_time) + ' seconds', font_name='Courier New', font_size=40, font_color=[1, 1, 1], color_space='rgb', size=(1800, 100), pos=(0, 0), align_horz='center', align_vert='center', units='pix') text.draw() window.flip() core.wait(duration_time) return
def text_and_dots(window: visual.window.Window, my_text: str, x_pos_text: float, x_pos_dot: float, y_pos_dot_1: float, y_pos_dot_2: float): """ Purpose ------- Define and display the text field and the dot. Parameters ---------- window : visual.window.Window Psychopy window. my_text : str Text to insert into the text field. x_pos_text : float x-position of the text in pixels. x_pos_dot : float x-position of the dot in pixels. y_pos_dot_1 : float y-position of the dot 1 in pixels. y_pos_dot_2 : float y-position of the dot 2 in pixels. Returns ------- None. """ text = visual.TextBox(window, text=my_text, font_name='Courier New', font_size=22, font_color=[1, 1, 1], color_space='rgb', size=(2000, 100), pos=(x_pos_text, 0), align_horz='left', align_vert='bottom', units='pix') dot_1 = visual.Circle(window, lineColor="white", fillColor="white", radius=2, edges=128, pos=(x_pos_dot, y_pos_dot_1), units='pix') dot_2 = visual.Circle(window, lineColor="white", fillColor="white", radius=2, edges=128, pos=(x_pos_dot, y_pos_dot_2), units='pix') text.draw() dot_1.draw() dot_2.draw() window.flip() return
def __init__(self, window=None, text='Hello world', font_name=None, font_size=32, position=(10,10), font_color=(1, 1, 1, 0), anchor='lower left', on=True, smooth=True, color=(1, 1, 1)): if BciGenericRenderer.subclass().screen is None: raise Exception('Stimuli can only be created after the screen is initialized!') # print("Screen Size: \n") # print(Map2PsypyCoords(BciGenericRenderer.subclass().screen,position)) # print("anchor: \n") # print(anchor) import psychopy.visual as psypy if window == None: window = BciGenericRenderer.subclass().screen col, op = _colorFromBCItoPsyPy(color) align_vert= 'top' align_horiz= 'left' if 'center' in anchor: align_vert='center' if 'top' in anchor: align_vert='top' if 'bottom' in anchor: align_vert='bottom' if 'center' in anchor: align_horz='center' if 'left' in anchor: align_horz='left' if 'right' in anchor: align_horz='right' self._text=text self._window=window self._font_size=font_size self._font_color=[1,1,1] self._size= tuple(BciGenericRenderer.subclass().screen.size) self._pos=Map2PsypyCoords(BciGenericRenderer.subclass().screen,position) self._align_vert= align_vert self._align_horz= align_horz self._units='pix' self._txtbox=psypy.TextBox(self._window,text=self._text, font_size=self._font_size, font_color=self._font_color, size= self._size, pos=self._pos, align_vert= self._align_vert, align_horz= self._align_horz, # grid_horz_justification='center', units=self._units,opacity=1.0)
def __init__(self, win, dline_num, state_high_img, state_low_img, size=None, pos=(0, 0), title='DIN ?', initial_state=False): px, py = pos self.state = initial_state self.line_number = dline_num self.on_button = visual.ImageStim(win, size=size, image=state_high_img, units='pix', pos=pos, autoLog=False, name=title) self.off_button = visual.ImageStim(win, size=size, image=state_low_img, units='pix', pos=pos, autoLog=False, name=title) self.title = visual.TextBox( window=win, text=title, bold=False, italic=False, font_size=12, font_color=[-1, -1, -1], size=(self.on_button.size[0] * 1.05, 20), pos=(px, py - 25), units='pix', align_vert='top', grid_horz_justification='center', grid_vert_justification='center', )
keystext = "PRESS 'q' or 'escape' to Quit.\n" keystext += "# 's': Stop/restart Movie.\n" keystext += "# 'p': Pause/Unpause Movie.\n" keystext += "# '>': Seek Forward 1 Second.\n" keystext += "# '<': Seek Backward 1 Second.\n" keystext += "# '-': Decrease Movie Volume.\n" keystext += "# '+': Increase Movie Volume." text = visual.TextBox(win, keystext, font_name=None, bold=False, italic=False, font_size=21, font_color=[-1, -1, -1, 1], textgrid_shape=(36, 7), pos=(0, -350), units='pix', grid_vert_justification='center', grid_horz_justification='left', align_horz='center', align_vert='bottom', autoLog=False, interpolate=True) # Start the movie stim by preparing it to play and then calling flip() shouldflip = mov.play() while mov.status != visual.FINISHED: # if only a movie stim is being shown on the window, only flip when a new # frame should be displayed. On a 60 Hz monitor playing a 30 Hz video, this # cuts CPU usage of the psychopy app. by almost 50%. if shouldflip:
def __init__(self, win, dial_color=[1, 1, 1], arrow_color=[-0.8, -0.8, -0.8], size=0.25, pos=(-0.5, 0.0), title='Analog Gauge'): self.w, self.h = win.size[0], win.size[1] px = self.w / 2 * pos[0] py = self.h / 2 * pos[1] self.dial_bkgd_inner = visual.RadialStim(win=win, tex='None', units='pix', pos=(px, py), color=dial_color, size=self.w * size, angularRes=360, visibleWedge=[0, 180], interpolate=True, ori=-90, autoLog=False) y_offset = self.h * .5 * (size * .08) w2 = (self.w / 2.0) h2 = (self.h / 2.0) strokew = (size / 18.0) self.handVerts = np.array([[0.0, (size * 0.9) * w2], [-strokew * h2, strokew * w2], [0.0, 0.0], [strokew * h2, strokew * w2]]) self.arrow = visual.ShapeStim(win, units='pix', vertices=self.handVerts, lineColor=[-1, -1, -1], fillColor=arrow_color, lineWidth=2, opacity=0.60, pos=(px, y_offset + py), autoLog=False) self.text_value = visual.TextBox( window=win, text=' ', bold=False, italic=False, font_size=18, font_color=[1, -1, -1, 1], size=(self.w * size, 40), pos=(px, py + (self.w * size) / 4.0 - 20), units='pix', grid_horz_justification='center', grid_vert_justification='center', ) self.title = visual.TextBox( window=win, text=title, bold=False, italic=False, font_size=13, font_color=[-1, -1, -1], size=(self.w * size, 25), #grid_color=[-1,1,-1,1], #grid_stroke_width=1, pos=(px, py), units='pix', align_vert='top', grid_horz_justification='center', grid_vert_justification='center', )
/ MovSinGrat_AmpFactor) / MovSinGrat_GammaFactor) pixVal = 2 * (texdata1DTmp / 255) - 1 #converting the pixel values from 0:255 to -1:1 #setting up the grating DrawTexture = visual.GratingStim(win=win, size=[winWidth, winHeight], units='pix', tex=pixVal) #display current lumninance value lum_text = visual.TextBox(window=win, text=(str(lum_val)), font_size=fontSize, font_color=fontClr, pos=(-2690, 475), size=(300, 37), units='pix', grid_horz_justification='center', grid_vert_justification='center') #draw grating and lum val DrawTexture.draw() lum_text.draw() #flip window and display gratting and lum val win.flip() mouse = event.Mouse(visible=True, win=win) #display grating; and allow user to modify luminance of the grating according to right and left clic
def drumgrating(SpatFreqDeg, TempFreq, t_before, t_During, t_after, Synch, Motionmode): #any paramter potentially changed by user in front.py from psychopy import visual from psychopy import event from psychopy import clock from win32api import GetSystemMetrics from init_para import (drumgrating_addblank, drumgrating_Amp_sinu, drumgrating_controlmod, drumgrating_dirindex, drumgrating_Ori, drumgrating_parasize, drumgrating_t_triginit, drumgrating_GammaFactor, drumgrating_AmpFactor, drumgrating_contrast, drumgrating_MeanLum, winWidth , winHeight, ScrnNum, PixelSize, winWidthofEachDisp, DisplayFrameWidth, FR, square1, square2, fontSize, fontClr, win, Local_IP, Local_Port, Remote_IP, Remote_Port) import socket import numpy as np import conv #crating mouse functionality mouse = event.Mouse( visible = True, win = win ) if Synch: #creating the socket in which communications will take place sock = socket.socket( socket.AF_INET, socket.SOCK_DGRAM ) #binding the local IP address and local port sock.bind((Local_IP, Local_Port)) #creating textbox showing that this VS computer is waiting for UDP signal standBy= visual.TextBox( window=win, text=("Waiting for starting the control computer."), font_size = fontSize, font_color=fontClr, pos=(-2690 ,475), size=(300,37), units='pix', grid_horz_justification='center', grid_vert_justification='center') standBy.draw() square1.draw() square2.draw() win.flip() try: #wait for the command 'gammacorrection' info = sock.recv(1024) except Exception: sock.close() print("Did not recieve info, connection timeout.") return #sending 'gammafloatampfloat' to the second PC sock.sendto(("gamma" + str(drumgrating_GammaFactor) + "amp" + str(drumgrating_AmpFactor)), (Remote_IP, Remote_Port)) #creating textbox showing that this VS computer is waiting for UDP signal standBy= visual.TextBox( window=win, text=("Control Computer is Ready."), font_size = fontSize, font_color=fontClr, pos=(-2690 ,475), size=(300,37), units='pix', grid_horz_justification='center', grid_vert_justification='center') standBy.draw() try: #waiting for the signal autoVs drumgrating_controlmod = sock.recv(1024) except Exception: sock.close() print("Did not recieve drumgrating_controlmod, connection timeout.") return #sending 'Wait for parameters' to the second PC sock.sendto("Wait for parameters", (Remote_IP, Remote_Port)) if drumgrating_controlmod == 'autoVS': try: drumgrating_parasize = sock.recv(1024) except Exception: sock.close() print("Did not recieve parasize, connection timeout.") return #sending a completion transcript sock.sendto("read parasize", (Remote_IP, Remote_Port)) #converting the string recieved into int drumgrating_parasize = conv.deleteParasize(drumgrating_parasize) #making the array in which the parameters will be added to paras = np.empty(shape=[drumgrating_parasize, 9]) #adding the parameters to the array #this for loop receives the 9 parameters for all the stimulations and adds them to an array for i in range(drumgrating_parasize): #start from 0 to parasize[0] - 1 temp = sock.recv(1024) temp =conv.convStr2Dig(temp) #adding the parameters to the array (temp) at position index paras[i, :] = temp sock.sendto("Para DONE", (Remote_IP, Remote_Port)) try: #recieving all orientation for stimuli 1 for veritcal, 0 for horizontal paratemp = sock.recv(1024) except Exception: sock.close() print("Did not recieve message, connection timeout.") return paratemp = conv.convStr2Dig(paratemp) #setting up the parameters based on what was send in the paras variable drumgrating_Ori = int(paratemp[0]) Motionmode = int(paratemp[1]) drumgrating_Amp_sinu = paratemp[2] drumgrating_addblank = paratemp[3] sock.sendto("Para DONE", (Remote_IP, Remote_Port)) elif drumgrating_controlmod == 'manualVS': return #if Synch is False, this else condition will make the parameters in the same format as if Synch was True else: #making the array in which the parameters will be added to paras = np.empty(shape=[drumgrating_parasize, 9]) #adding the parameters to the array for i in range(drumgrating_parasize): #start from 0 to parasize[0] - 1 #adding the parameters as an array at index i paras[i, :] = [SpatFreqDeg, TempFreq, drumgrating_contrast, drumgrating_MeanLum, drumgrating_dirindex, t_before, t_During, t_after, drumgrating_t_triginit] paratemp = [drumgrating_Ori, Motionmode, drumgrating_Amp_sinu, drumgrating_addblank] #setting up the parameters based on what was send in the paras variable drumgrating_Ori = int(paratemp[0]) Motionmode = int(paratemp[1]) drumgrating_Amp_sinu = paratemp[2] drumgrating_addblank = paratemp[3] if Synch: #get file name $$$$$$$$$$$$$$$$$$$$ while True: try: info = sock.recv(1024) except: pass if info.strip(): #strip spaces print (info) sock.sendto(("nex"), (Remote_IP, Remote_Port)) break if mouse.getPressed()[1]: sock.close() return #$$$$$$$$$$$$$$$$$$$$$$$$$$ #waiting for "STR" while True: try: info = sock.recv(1024) except: pass if info == "STR": sock.sendto(("VS is running"), (Remote_IP, Remote_Port)) break if mouse.getPressed()[1]: sock.close() return #generating the pixel angles relaive to the mouse position based on the orientation of the stimulus if drumgrating_Ori == 1: #generating matrix that will be the place holder for every pixel pixelangle = np.empty(shape=[1, winWidth]) #pixel has to be 2D since the image is 2D temp = np.array(range(winWidthofEachDisp)) temp.reshape(1,winWidthofEachDisp)# the temp must be 2D tempPixelAngle = np.degrees(np.arctan((temp - (winWidthofEachDisp/2.0))*PixelSize*(2.0/DisplayFrameWidth))) + 45 #calculating the pixel angle for first monitor for i in range(ScrnNum): pixelangle[:,i*winWidthofEachDisp: (i + 1)*winWidthofEachDisp ] = tempPixelAngle + 90*i #taking specific ranges within the full winWidth and replacing the values with the corresponding angles else: return for m in range(drumgrating_parasize): tic = clock.getTime() if m == 0: SpatFreqDeg = paras[m, 0] TempFreq = paras[m, 1] drumgrating_contrast = paras[m, 2] drumgrating_MeanLum = paras[m, 3] drumgrating_dirindex = paras[m, 4] t_before = paras[m, 5] t_During = paras[m, 6] t_after = paras[m, 7] drumgrating_t_triginit = paras[m, 8] pixelformeanlum = 2*(np.exp(np.log(drumgrating_MeanLum/drumgrating_AmpFactor)/drumgrating_GammaFactor)/255.0) -1 drumgrating_gray = drumgrating_MeanLum inc = drumgrating_gray*drumgrating_contrast #frames to be calculated per period frames = round(FR/TempFreq) phase = np.array(range(int(frames))) if Motionmode == 1: phase = (phase/float(round(frames)))*(2.0*np.pi) elif Motionmode == 0: phase = drumgrating_Amp_sinu*np.sin((phase/frames)*2*np.pi)*SpatFreqDeg*2*np.pi #generating the pixel values for the stimulus depending on the orientation of the stimulus if drumgrating_Ori == 1: #creating the list that will hold all frames texdata1D = [] #generating the pixel values for vertical stimulus for i in range(int(frames)): texdata1DTmp = np.exp(np.log((drumgrating_gray + inc*np.sin(pixelangle*SpatFreqDeg*2*np.pi + phase[i]))/drumgrating_AmpFactor)/drumgrating_GammaFactor) pixVal = 2*(texdata1DTmp/255) - 1 #converting the pixel values from 0:255 to -1:1 texdata1D.append(pixVal) else: return else: if sum(abs(paras[m, :] - paras[m-1, :])) > 1e-7: #if (not all([v == 0 for v in abs(paras[m, :] - paras[m-1, :])])): SpatFreqDeg = paras[m, 0] TempFreq = paras[m, 1] drumgrating_contrast = paras[m, 2] drumgrating_MeanLum = paras[m, 3] drumgrating_dirindex = paras[m, 4] t_before = paras[m, 5] t_During = paras[m, 6] t_afterVal = paras[m, 7] drumgrating_t_triginit = paras[m, 8] pixelformeanlum = 2*(np.exp(np.log(drumgrating_MeanLum/drumgrating_AmpFactor)/drumgrating_GammaFactor)/255.0) -1 drumgrating_gray = drumgrating_MeanLum inc = drumgrating_gray*drumgrating_contrast #frames to be calculated per period frames = round(FR/TempFreq) phase = np.array(range(int(frames))) if Motionmode == 1: phase = (phase/float(round(frames)))*(2.0*np.pi) elif Motionmode == 0: phase = drumgrating_Amp_sinu*np.sin((phase/frames)*2*np.pi)*SpatFreqDeg*2*np.pi #generating the pixel values for the stimulus depending on the orientation of the stimulus if drumgrating_Ori == 1: #creating the list that will hold all frames texdata1D = [] #generating the pixel values for vertical stimulus for i in range(int(frames)): texdata1DTmp = np.exp(np.log((drumgrating_gray + inc*np.sin(pixelangle*SpatFreqDeg*2*np.pi + phase[i]))/drumgrating_AmpFactor)/drumgrating_GammaFactor) pixVal = 2*(texdata1DTmp/255) - 1 #converting the pixel values from 0:255 to -1:1 texdata1D.append(pixVal) else: return #creating the looping variable for the simulation depending on the value of drumgrating_addblank if drumgrating_addblank == 0 or drumgrating_addblank == 1: #this variable controls the looping and frame that is to be displayed frmNum = 0 #frame number within one cycle elif drumgrating_addblank == 2 and m == 0: #this variable controls the looping and frame that is to be displayed frmNum = 0 #frame number within one cycle #setting up the grating DrawTexture = visual.GratingStim( win=win, size = [winWidth, winHeight], units = 'pix', tex=texdata1D[0] ) if Synch: #waiting for "TRLstart", if TRLstart is sent this loop will send "TRLstart m" then break sock.settimeout(0.5) comm = [""] while True: try: comm = sock.recvfrom(1024) except Exception: pass if comm[0] == "TRLstart": sock.sendto(("TRLstart " + str(m +1)), (Remote_IP, Remote_Port)) break elif comm[0] == "ESC1": #if 'ESC1' is in the buffer, return to front sock.close() return if mouse.getPressed()[1]: sock.close() print("Exit at ESC1") return if drumgrating_addblank == 1.0: win.color = pixelformeanlum elif drumgrating_addblank == 0.0: DrawTexture.draw() elif drumgrating_addblank == 2.0: DrawTexture.tex = texdata1D[frmNum] DrawTexture.draw() frmNum = frmNum + 1 if frmNum >= len(texdata1D): frmNum = 0 square1.draw() square2.draw() win.flip() #time before the stimulation toc = clock.getTime() - tic while toc < (t_before/1000.0): toc = clock.getTime() - tic if drumgrating_addblank == 2: #assigning the texture using the corrusponding frame DrawTexture.tex = texdata1D[frmNum] #this if statement is for existing the stimulation if mouse.getPressed()[1]: if Synch: sock.close() return frmNum = frmNum + 1 if frmNum >= len(texdata1D): frmNum = 0 DrawTexture.draw() square1.draw() square2.draw() win.flip() #t_triger initial timing for triggerin the camera for i in range(int(FR*drumgrating_t_triginit/1000.0)): if i < 3: square1.fillColor = [1,1,1] square2.fillColor = [-1,-1,-1] else: square1.fillColor = [-1,-1,-1] square2.fillColor = [-1,-1,-1] if drumgrating_addblank == 1.0: win.color = pixelformeanlum elif drumgrating_addblank == 0.0: DrawTexture.draw() elif drumgrating_addblank == 2.0: #assigning the texture using the corrusponding frame DrawTexture.tex = texdata1D[frmNum] frmNum = frmNum + 1 if frmNum >= len(texdata1D): frmNum = 0 DrawTexture.draw() if mouse.getPressed()[1]: if Synch: sock.close() return square1.draw() square2.draw() win.flip() #making the top square white square1.fillColor = [-1,-1,-1] square2.fillColor = [1,1,1] #drawing the frames on the window for frm in range(int(FR*t_During/1000.0)): #assigning the texture using the corrusponding frame DrawTexture.tex = texdata1D[frmNum] #this if statement is for existing the stimulation if mouse.getPressed()[1]: if Synch: sock.close() return frmNum = frmNum + 1 if frmNum >= len(texdata1D): frmNum = 0 DrawTexture.draw() square1.draw() square2.draw() win.flip() if Synch: sock.sendto(("TRLdone " + str(m +1)), (Remote_IP, Remote_Port)) #changing the characteristics of the two squares at the bottom left corner square1.fillColor = [-1,-1,-1] square2.fillColor = [-1,-1,-1] #time after the stimulation for toc in range(int(t_after*FR/1000.0)): if drumgrating_addblank == 1.0: win.color = pixelformeanlum elif drumgrating_addblank == 0.0: DrawTexture.draw() elif drumgrating_addblank == 2: #assigning the texture using the corrusponding frame DrawTexture.tex = texdata1D[frmNum] frmNum = frmNum + 1 if frmNum >= len(texdata1D): frmNum = 0 DrawTexture.draw() square1.draw() square2.draw() win.flip() if Synch: #checking for stop button while True: try: comm = sock.recvfrom(1024) except: pass if comm[0] == "ESC1": sock.close() return elif comm[0] == "ESC0": break elif mouse.getPressed()[1]: sock.close() print("Exit at ESC2") return
texRes=128, interpolate=True, depth=-1.0) from psychopy.hardware import joystick from psychopy import visual joystick.backend='pyglet' # must match the Window nJoys = joystick.getNumJoysticks() # to check if we have any id = 0 joy = joystick.Joystick(id) # id must be <= nJoys - 1 nAxes = joy.getNumAxes() # for interest # Create some text box objects for debugging purposes shouldBeReinforced_txt = visual.TextBox(window=win, text=' ', font_size=20, font_color=[-1, -1, 1], size=(1.9, .3), pos=(-0.7, 0.4), grid_horz_justification='center', units='norm') reward_txt = visual.TextBox(window=win, text='reward!', font_size=30, font_color=[1, 1, 1], size=(1.9, .3), pos=(0.0, 0.6), grid_horz_justification='center', units='norm') def show_debugging_stuff(): ''' set text for stims to show every frame ''' shouldBeReinforced_txt.setText('Reinforce?: \n %s' %str(shouldBeReinforced)) #reward_txt.setText('reward!')# %str(reward)) #isReinforced_txt.setText('isReinforced: %s' %str(isReinforced)) # Draw stim on screen shouldBeReinforced_txt.draw()
) myMouse = event.Mouse() # Create two textBox stim, each using different parameters supported by # Textbox. Note that since no font_name is provided when creating the # textbox stim, a default font is selected by TextBox stim automatically. # sometext=u'PRESS ANY KEY TO QUIT DEMO.' textbox=visual.TextBox(window=window, text=sometext, bold=False, italic=False, font_size=21, font_color=[-1,-1,1], size=(1.9,.3), grid_color=[-1,1,-1,1], grid_stroke_width=1, pos=(0.0,0.5), units='norm', grid_horz_justification='center', grid_vert_justification='center', ) if textbox.getDisplayedText()!=textbox.getText(): print '**Note: Text provided to TextBox does not fit within the TextBox bounds.' #print textbox.getTextGridCellPlacement() print 'Char Index 0 glyph box:',textbox.getGlyphPositionForTextIndex(0) print 'Char Index 7 glyph box:',textbox.getGlyphPositionForTextIndex(7) disp_txt_len=len(textbox.getDisplayedText())-1 print 'Char Index %d glyph box:'%(disp_txt_len),textbox.getGlyphPositionForTextIndex(disp_txt_len)
def calibration(win): ''' input: win (define a window) output: vis stim with varying lum val changing based on log scale This function will generate static grating will changing luminance (using log scale). This visual stim will be used to calibrate the eye tracking system. NOTE: the relationship beteen pupil size and luminance is logarithmic; therefore the steps by which the liminance is increase follow a log scale; ''' import numpy as np from psychopy import event, visual, core from init_para import (MovSinGrat_Amp_sinu, MovSinGrat_GammaFactor, MovSinGrat_AmpFactor, MovSinGrat_contrast, MovSinGrat_MeanLum, win, winWidth, winHeight, ScrnNum, PixelSize, winWidthofEachDisp, DisplayFrameWidth, FR, square1, square2, fontSize, fontClr, win, Local_IP, Local_Port, Remote_IP, Remote_Port) #define parameters WholeWinwidth = winWidth WinWidthofEachdisp = winWidth / ScrnNum #set the type of pattern for the calibration: calib_pattern = 0 if calib_pattern: #1 = gratting, 0 = uniform maximum = (MovSinGrat_AmpFactor * 250**MovSinGrat_GammaFactor ) / 2 #the max elgible val for MeanLum in cd/m^2 else: maximum = (MovSinGrat_AmpFactor * 250**MovSinGrat_GammaFactor ) #the max elgible val for MeanLum in cd/m^2 #minimum = (MovSinGrat_AmpFactor*1**MovSinGrat_GammaFactor)/2 #the min elgible val for MeanLum incd/m^2 minimum = 15 log_max = np.log(maximum) log_min = np.log(minimum) num_step = 5 #but total steps plus 2 0 log_step = (log_max - log_min) / num_step Contrast = 1 temp = np.array(range(0, (num_step + 1))) log_lum_val = log_min + log_step * (temp) lum_val_list = np.exp(log_lum_val) lum_val_list = np.append([0], lum_val_list) lum_val = lum_val_list[0] step_count = 0 #counter to loop around lum values inc = lum_val * Contrast #increase the step by this variable SpatFreqDeg = 0.1 #MeanLum = (maximum)/2 phase = 0 #in radius #generating matrix that will be the place holder for every pixel pixelangle = np.empty(shape=[1, winWidth ]) #pixel has to be 2D since the image is 2D temp = np.array(range(winWidthofEachDisp)) temp.reshape(1, winWidthofEachDisp) # the temp must be 2D tempPixelAngle = np.degrees( np.arctan((temp - (winWidthofEachDisp / 2.0)) * PixelSize * (2.0 / DisplayFrameWidth)) ) + 45 #calculating the pixel angle for first monitor for i in range(ScrnNum): pixelangle[:, i * winWidthofEachDisp:( i + 1 ) * winWidthofEachDisp] = tempPixelAngle + 90 * i #taking specific ranges within the full winWidth and replacing the values with the corresponding angles #generating the pixel values for vertical stimulus texdata1DTmp = np.exp( np.log((lum_val + inc * np.sin(pixelangle * SpatFreqDeg * 2 * np.pi + phase)) / MovSinGrat_AmpFactor) / MovSinGrat_GammaFactor) pixVal = 2 * (texdata1DTmp / 255) - 1 #converting the pixel values from 0:255 to -1:1 if not calib_pattern: #for uniform mask, set pixval to single lum_val uniform_pix = np.exp( np.log(lum_val / MovSinGrat_AmpFactor) / MovSinGrat_GammaFactor) pixVal[:] = 2 * (uniform_pix / 255) - 1 #setting up the grating DrawTexture = visual.GratingStim(win=win, size=[winWidth, winHeight], units='pix', tex=pixVal) #display current lumninance value lum_text = visual.TextBox(window=win, text=(str('%.1f' % (lum_val))), font_size=fontSize + 7, font_color=[1, 1, 1], pos=(WholeWinwidth / 2 * (-1) + 50, winHeight / 2 - 25), size=(300, 37), units='pix', grid_horz_justification='center', grid_vert_justification='center') #draw grating and lum val DrawTexture.draw() lum_text.draw() #flip window and display gratting and lum val win.flip() mouse = event.Mouse(visible=True, win=win) #display grating; and allow user to modify luminance of the grating according to right and left clic while True: core.wait(0.1) #get keys currMouse = mouse.getPressed() #clicking the middle button causes program to close if currMouse[1]: break #left clic: increase lum if currMouse[0]: #increase counter (starts at 0) step_count += 1 #increase by increment value LOG lum_val = lum_val_list[step_count % (len(lum_val_list))] inc = lum_val * Contrast #regenerating the pixel values texdata1DTmp = np.exp( np.log((lum_val + inc * np.sin(pixelangle * SpatFreqDeg * 2 * np.pi + phase)) / MovSinGrat_AmpFactor) / MovSinGrat_GammaFactor) pixVal = 2 * ( texdata1DTmp / 255) - 1 #converting the pixel values from 0:255 to -1:1 if not calib_pattern: #for uniform mask, set pixval to single lum_val uniform_pix = np.exp( np.log(lum_val / MovSinGrat_AmpFactor) / MovSinGrat_GammaFactor) pixVal[:] = 2 * (uniform_pix / 255) - 1 #redraw texture and new lum val #setting up the grating DrawTexture = visual.GratingStim(win=win, size=[winWidth, winHeight], units='pix', tex=pixVal) #display current lumninance value lum_text = visual.TextBox(window=win, text=(str('%.1f' % (lum_val))), font_size=fontSize + 7, font_color=[1, 1, 1], pos=(WholeWinwidth / 2 * (-1) + 50, winHeight / 2 - 25), size=(300, 37), units='pix', grid_horz_justification='center', grid_vert_justification='center') #draw grating and lum val DrawTexture.draw() lum_text.draw() #flip window and display gratting and lum val win.flip() while any(currMouse): currMouse = mouse.getPressed() if currMouse[2]: #right clic: decrease lum #increase counter (starts at 0) step_count -= 1 #increase by increment value lum_val = lum_val_list[step_count % (len(lum_val_list))] inc = lum_val * Contrast #regenerating the pixel values from luminance val texdata1DTmp = np.exp( np.log((lum_val + inc * np.sin(pixelangle * SpatFreqDeg * 2 * np.pi + phase)) / MovSinGrat_AmpFactor) / MovSinGrat_GammaFactor) pixVal = 2 * ( texdata1DTmp / 255) - 1 #converting the pixel values from 0:255 to -1:1 if not calib_pattern: #for uniform mask, set all of pixval to lum_val uniform_pix = np.exp( np.log(lum_val / MovSinGrat_AmpFactor) / MovSinGrat_GammaFactor) pixVal[:] = 2 * (uniform_pix / 255) - 1 #redraw texture and new lum val #setting up the grating DrawTexture = visual.GratingStim(win=win, size=[winWidth, winHeight], units='pix', tex=pixVal) #display current lumninance value lum_text = visual.TextBox(window=win, text=(str('%.1f' % (lum_val))), font_size=fontSize + 7, font_color=[1, 1, 1], pos=(WholeWinwidth / 2 * (-1) + 50, winHeight / 2 - 25), size=(300, 37), units='pix', grid_horz_justification='center', grid_vert_justification='center') #draw grating and lum val DrawTexture.draw() lum_text.draw() #flip window and display gratting and lum val win.flip() while any(currMouse): currMouse = mouse.getPressed() while any(currMouse): currMouse = mouse.getPressed() return
def main(): ''' [!] FUNCIÓN principal del Trabajo Fin de Grado Es lanzado junto a los parámetros adecuados y se encargará de llevar a cabo el experimento. Manejará la construcción del hilo de captura de señal EEG, así como de mostrar las imágenes en pantalla. ''' banner = """ ██████╗ ██████╗██╗ ████████╗███████╗ ██████╗ ██╔══██╗██╔════╝██║ ╚══██╔══╝██╔════╝██╔════╝ ██████╔╝██║ ██║█████╗██║ █████╗ ██║ ███╗ ██╔══██╗██║ ██║╚════╝██║ ██╔══╝ ██║ ██║ ██████╔╝╚██████╗██║ ██║ ██║ ╚██████╔╝ ╚═════╝ ╚═════╝╚═╝ ╚═╝ ╚═╝ ╚═════╝ Enrique Tomás Martínez Beltrán """ print(colored(banner, 'yellow')) parser = argparse.ArgumentParser(description='Obtención de señal EEG. Ejecución del experimento.', add_help=False) parser.add_argument('-n', '--name', dest='name', default="exp_{}".format(datetime.now().strftime("%d-%m-%Y-%H-%M-%S")), help='Nombre del experimento') parser.add_argument('-dim', '--dim', dest='size_monitor', default=[1920, 1080], help='Dimensiones de la pantalla (default [1920,1080])') parser.add_argument('-dm', '--distmon', dest='distance_monitor', default=67, help='Distancia al monitor -en centímetros- (default 67)') parser.add_argument('-m', '--mode', dest='mode', default=2, help='Modo de ejecución del programa (default 2)') # parser.add_argument('-t', '--time', dest='time', default=20, # help='Tiempo de duración de la grabación') parser.add_argument('-i', '--images', dest='images', default=30, help='Número de imágenes distintas utilizadas en el experimento (default 30)') parser.add_argument('-p', '--prob', dest='prob_target', default=0.1, help='Probabilidad de aparición del Target en el experimento -tanto por 1- (default 0.1)') parser.add_argument('-tt', dest='target_time', default=5, help='Tiempo de visualización del target -en segundos- (default 5)') parser.add_argument('-in', dest='image_interval', default=0.250, help='Tiempo transcurrido entre imágenes -en segundos- (default 0.250)') parser.add_argument('-io', dest='image_offset', default=0.150, help='Tiempo offset de cada imagen -en segundos- (default 0.150)') parser.add_argument('-j', dest='jitter', default=0.2, help='Tiempo jitter variable al mostrar imagen -en segundos- (default 0.2)') parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + VERSION, help="Versión del programa.") parser.add_argument('-a', '--about', action='version', version='Creado por Enrique Tomás Martínez Beltrán', help="Información sobre el creador del programa.") parser.add_argument('-h', '--help', action='help', default=argparse.SUPPRESS, help='Ayuda sobre la utilización del programa.') args = parser.parse_args() experiment = args.name experiment = 'exp_23-07-2020-00-28-33' # experiment_time = float(args.time) mode = args.mode total_img = int(args.images) size_monitor = args.size_monitor prob_target = float(args.prob_target) distance_monitor = int(args.distance_monitor) try: if not os.path.isdir('experiments/' + experiment): os.makedirs("experiments/{}/target".format(experiment)) os.makedirs("experiments/{}/no_target".format(experiment)) os.makedirs("experiments/{}/records".format(experiment)) if not os.listdir('experiments/{}/target'.format(experiment)) or not os.listdir( 'experiments/{}/no_target'.format(experiment)): if (mode == 1): printInfo("Modo 1 seleccionado (Modo manual)") # Las imágenes son añadidas manualmente, únicamente se obtienen con la aplicación elif (mode == 2): printInfo("Modo 2 seleccionado (Modo automático)") printInfo("Descargando recursos...") url_all = "https://api.unsplash.com/photos/random?count={}".format(total_img) headers = { 'Authorization': 'Client-ID {}'.format(API) } response = requests.get(url_all, headers=headers, stream=True) response_json = json.loads(response.text) is_target = False count = 0 for image in response_json: url = image['urls']['raw'] response = requests.get(url + '&fm=jpg&fit=crop&w=1920&h=1080&q=80&fit=max', headers=headers, stream=True) if not is_target: with open('experiments/{}/target/target.jpeg'.format(experiment), 'wb') as out_file: shutil.copyfileobj(response.raw, out_file) is_target = True continue with open('experiments/{}/no_target/no_target_{}.jpeg'.format(experiment, count), 'wb') as out_file: shutil.copyfileobj(response.raw, out_file) del response count = count + 1 image_interval = float(args.image_interval) image_offset = float(args.image_offset) jitter = float(args.jitter) target_time = int(args.target_time) # Lanzamos el hilo # Esperará hasta que pulsamos <enter> para empezar la captura sincronizada. # Tiempo aproximado del experimento. # Será utilizado por el hilo para grabar X segundos (serán siempre más para evitar cortes en el experimento) experiment_time = total_img * image_interval * image_offset + total_img / 2 process = Thread(target=record_experiment, args=[experiment, experiment_time]) process.start() print() printInfo("Nombre del experimento: " + experiment) printInfo("Dimensiones de la pantalla: ancho={} | alto={}".format(size_monitor[0], size_monitor[1])) printInfo("Ruta del experimento: experiments/{}".format(experiment)) printInfo("Duración aproximada del experimento: " + str(experiment_time) + " s") printInfo("Tiempo devisualización de Target pre-experimento: " + str(target_time * 1000) + " ms") printInfo("Intervalo entre imágenes: " + str(image_interval * 1000) + " ms") printInfo("Probabilidad de aparición Target: " + str(prob_target * 100) + " %") if jitter: printInfo("Jitter: " + str(jitter * 1000) + " ms") try: ''' Recuperamos metadata.txt del experimento (si existe) ''' images = pd.read_csv('experiments/{}/metadata.txt'.format(experiment)) except: printError("Metadata no encontrado, creando metadata aleatorio...") ''' 1 -> TARGET 0 -> NO TARGET ''' # Array con total_images de [0,1] con 0.1 de probabilidad el 1 -> TARGET img_types = np.random.binomial(1, prob_target, total_img) # Ajuste para evitar 2 o más Target seguidos def check(lst): caux = 0 last = lst[0] for i, num in enumerate(lst[1:]): if num == 1 and last == 1: caux = caux + 1 lst[i] = 0 last = num return caux n = check(img_types) for i in range(n): while (True): r = randint(0, len(img_types)) if img_types[r] != 1: img_types[r] = 1 if (check(img_types)): continue else: break images = pd.DataFrame(dict(img_type=img_types, timestamp=np.zeros(total_img))) images.to_csv('experiments/{}/metadata.txt'.format(experiment), index=False) print() printInfo("DataFrame generado: ") print() print(images) print() mon = monitors.Monitor('asusmon') mon.setDistance(distance_monitor) window = visual.Window(size_monitor, monitor=mon, units="pix", fullscr=False, color=[-1, -1, -1]) def cargarImagen(file): nonlocal window return visual.ImageStim(win=window, image=file, size=size_monitor) targets = [] no_targets = [] t_argets = glob('experiments/{}/target/*.jpeg'.format(experiment)) for i in t_argets: targets.append(cargarImagen(i)) not_argets = glob('experiments/{}/no_target/*.jpeg'.format(experiment)) for i in not_argets: no_targets.append(cargarImagen(i)) text1 = visual.TextBox(window=window, text='[Trabajo Fin de Grado - Enrique Tomás Martínez Beltrán]', font_size=20, font_color=[1, 1, 1], textgrid_shape=[55, 2], pos=(0.0, 0.6), # border_color=[-1, -1, 1, 1], # border_stroke_width=4, # grid_color=[1, -1, -1, 0.5], # grid_stroke_width=1 ) text2 = visual.TextBox(window=window, text='Presiona <enter> para comenzar el experimento...', font_size=20, font_color=[1, 1, 1], textgrid_shape=[48, 2], pos=(0.0, 0.3), # border_color=[-1, -1, 1, 1], # border_stroke_width=4, # grid_color=[1, -1, -1, 0.5], # grid_stroke_width=1 ) text3 = visual.TextBox(window=window, text='Fin del experimento...', font_size=20, font_color=[1, 1, 1], textgrid_shape=[55, 2], pos=(0.0, 0.6), # border_color=[-1, -1, 1, 1], # border_stroke_width=4, # grid_color=[1, -1, -1, 0.5], # grid_stroke_width=1 ) text4 = visual.TextBox(window=window, text='¡Gracias por participar!', font_size=20, font_color=[1, 1, 1], textgrid_shape=[48, 2], pos=(0.0, 0.3), # border_color=[-1, -1, 1, 1], # border_stroke_width=4, # grid_color=[1, -1, -1, 0.5], # grid_stroke_width=1 ) logo_umu = visual.ImageStim(win=window, image="experiments/umu.jpg", units='pix') logo_umu.pos += -0.3 logo_umu.size = [610, 140] text1.draw() text2.draw() logo_umu.draw() window.flip() ''' Si presionamos [ENTER] -> Iniciamos el experimento Creamos Estimulo Stream para que sea detectado por el hilo ''' key = event.waitKeys() while ('return' not in key): key = event.waitKeys() core.wait(3) ''' Mostramos Target, el experimento comenzará después de mostrar X segundos la imagen Target ''' target = choice(targets) target.draw() window.flip() core.wait(target_time) window.flip() info = StreamInfo('Estimulo', 'Estimulo', 1, 0, 'int32', 'estimulo12310') outlet = StreamOutlet(info) nImage = 0 nTarget = 0 nNoTarget = 0 for i, trial in images.iterrows(): # Intervalo entre imágenes core.wait(image_interval + np.random.rand() * jitter) img_type = images['img_type'].iloc[i] image = choice(targets if img_type == 1 else no_targets) nImage = nImage + 1 if img_type == 1: nTarget = nTarget + 1 else: nNoTarget = nNoTarget + 1 image.draw() timestamp = local_clock() images.at[i, 'timestamp'] = timestamp ''' Si img_type = 1 -> Target -> Out=1 Si img_type = 0 -> NoTarget -> Out=2 # El Out implica escritura en csv final ''' outlet.push_sample([2 if img_type == 0 else 1], timestamp) window.flip() # window.update() # offset core.wait(image_offset) # window.flip() # if len(event.getKeys()) > 0 or (time() - start) > experiment_time: # break if event.getKeys() == 'Esc': printError('Cancelando experimento...') break event.clearEvents() core.wait(1.5) text3.draw() text4.draw() window.flip() core.wait(5) window.close() process.join() print() printSuccess('---------------------------------------------') printSuccess("Datos del experimento en: experiments/{}".format(experiment)) printSuccess('---------------------------------------------') printSuccess('Experimento finalizado') printSuccess("Número de imágenes mostradas: " + str(nImage)) printSuccess("Número de imágenes Target mostradas: " + str(nTarget)) printSuccess("Número de imágenes Non-Target mostradas: " + str(nNoTarget)) printSuccess('---------------------------------------------') print() printInfo("DataFrame final: ") print() print(images) core.quit() except KeyboardInterrupt: printError('Cancelando experimento...') window.close() core.quit()
print("available_font_names:", fm.getFontFamilyNames()) # Create Window window = visual.Window((800, 600), units='norm', fullscr=False, allowGUI=True, screen=0) sometext = 'PRESS ANY KEY TO QUIT DEMO.' textbox1 = visual.TextBox( window=window, text=sometext, font_name=fm.getFontFamilyNames()[0], font_size=21, font_color=[-1, -1, 1], size=(1.9, .3), pos=(0.0, 0.25), grid_horz_justification='center', units='norm', ) textbox2 = visual.TextBox( window=window, text='This TextBox illustrates many of the different UX elements.', font_size=32, font_color=[1, -1, -1], background_color=[-1, -1, -1, 1], border_color=[-1, -1, 1, 1], border_stroke_width=4, textgrid_shape=[20, 4], # 20 cols (20 chars wide)
# Initializing rating scale ratingScale = visual.RatingScale(win, name='Preference', choices=['1', '2', '3', '4', '5', '6', '7'], pos=[-500, -200]) familiarityScale = visual.RatingScale( win, name='familiarity', choices=['1', '2', '3', '4', '5', '6', '7'], pos=[500, -200]) ratingTitle = visual.TextBox( window=win, text='Preference', font_size=40, font_color=[1, 1, 1], size=(1.9, .3), pos=(-.54, -.3), grid_horz_justification='center', units='norm', ) familiarityTitle = visual.TextBox( window=win, text='familiarity', font_size=40, font_color=[1, 1, 1], size=(1.9, .3), pos=(.54, -.3), grid_horz_justification='center', units='norm', )
def front(): import numpy as np from psychopy import core, visual, event import VS import drumgrating import movSinGrat_tuning import EYE_calibration import socket from init_para import * import TestNov import Vdrumgrating import Rf6x8 #CREATING TEXTBOXES #DRUMGRATING #creating a name textbox Name_Obj = visual.TextBox( window=win, text=("Movn sin Grdnt"), font_size = fontSize, font_color=fontClr, border_color=boarderClr, pos=(origin_x + 0.5*front_textbox_size[0], origin_y - button_init_y), size= front_textbox_size, units='norm', grid_horz_justification='center', grid_vert_justification='center') #creating a textbox for temporal frequency TempFreq_Obj = visual.TextBox(window=win, text=('Tmp Frq: ' + str(drumgrating_tempFreqVal)), font_size=fontSize, font_color=fontClr, border_color=boarderClr, pos=(origin_x + 1.5*front_textbox_size[0], origin_y - button_init_y), size= front_textbox_size, units='norm', grid_horz_justification='center', grid_vert_justification='center') #creating a textbox for spatial frequency SpatFreq_Obj = visual.TextBox(window=win, text=('Spt Frq: ' + str(drumgrating_SpatFreqVal)), font_size=fontSize, font_color=fontClr, border_color=boarderClr, units = 'norm', pos=(origin_x + 2.5*front_textbox_size[0], origin_y - button_init_y), size= front_textbox_size, grid_horz_justification='center', grid_vert_justification='center') #creating a textbox for time before the stimulation t_Before_Obj = visual.TextBox(window=win, text=('T_Bef: '+ str(drumgrating_t_beforeVal)), font_size=fontSize, font_color=fontClr, border_color=boarderClr, pos= (origin_x + 3.5*front_textbox_size[0], origin_y - button_init_y), size= front_textbox_size, units='norm', grid_horz_justification='center', grid_vert_justification='center') #creating a textbox for duration of stimulation t_stim_Obj = visual.TextBox(window=win, text=('T_Dur: ' + str(drumgrating_t_DuringVal)), font_size=fontSize, font_color=fontClr, border_color=boarderClr, pos= (origin_x + 4.5*front_textbox_size[0], origin_y - button_init_y), size= front_textbox_size, units='norm', grid_horz_justification='center', grid_vert_justification='center') #creating a textbox for the time after the stimulation t_after_Obj = visual.TextBox( window=win, text=('T_Aft: ' + str(drumgrating_t_afterVal)), font_size = fontSize, font_color=fontClr, border_color=boarderClr, pos= (origin_x + 5.5*front_textbox_size[0], origin_y - button_init_y), size= front_textbox_size, units='norm', grid_horz_justification='center', grid_vert_justification='center') #creating a Synch textbox synch_ObjDRUM = visual.TextBox( window=win, text=('Synch:' + str(drumgrating_syncVal)), font_size = fontSize, font_color=fontClr, border_color=boarderClr, pos= (origin_x + 6.5*front_textbox_size[0], origin_y - button_init_y), size= front_textbox_size, units='norm', grid_horz_justification='center', grid_vert_justification='center') #creating a moationMode textbox mtnmode_Obj = visual.TextBox( window=win, text=("Motn Mode: " + str(drumgrating_Motionmode)), font_size = fontSize, font_color=fontClr, border_color=boarderClr, pos= (origin_x + 7.5*front_textbox_size[0], origin_y - button_init_y), size= front_textbox_size, units='norm', grid_horz_justification='center', grid_vert_justification='center') #ANGLE ORIENTATION #creating a name textbox Name_ObjORI = visual.TextBox( window=win, text=("Movn ORI Grdnt"), font_size = fontSize, font_color=fontClr, border_color=boarderClr, pos=(origin_x + 0.5*front_textbox_size[0], origin_y - button_init_y - 1*(front_textbox_size[1]+button_spacing)), size= front_textbox_size, units='norm', grid_horz_justification='center', grid_vert_justification='center') #creating a textbox for temporal frequency TempFreq_ObjORI = visual.TextBox(window=win, text=('Tmp Frq: ' + str(MovSinGrat_tempFreqVal)), font_size=fontSize, font_color=fontClr, border_color=boarderClr, pos=(origin_x + 2.5*front_textbox_size[0], origin_y -button_init_y - 1*(front_textbox_size[1]+button_spacing)), size= front_textbox_size, units='norm', grid_horz_justification='center', grid_vert_justification='center') #creating a textbox for spatial frequency SpatFreq_ObjORI = visual.TextBox(window=win, text=('Spt Frq: ' + str(MovSinGrat_SpatFreqVal)), font_size=fontSize, font_color=fontClr, border_color=boarderClr, units = 'norm', pos=(origin_x + 3.5*front_textbox_size[0], origin_y - button_init_y - 1*(front_textbox_size[1]+button_spacing)), size= front_textbox_size, grid_horz_justification='center', grid_vert_justification='center') #creating a textbox for time before the stimulation t_Before_ObjORI = visual.TextBox(window=win, text=('T_Bef: '+ str(MovSinGrat_t_beforeVal)), font_size=fontSize, font_color=fontClr, border_color=boarderClr, pos= (origin_x + 4.5*front_textbox_size[0], origin_y - button_init_y - 1*(front_textbox_size[1]+button_spacing)), size= front_textbox_size, units='norm', grid_horz_justification='center', grid_vert_justification='center') #creating a textbox for duration of stimulation t_stim_ObjORI = visual.TextBox(window=win, text=('T_Dur: ' + str(MovSinGrat_t_stimVal)), font_size=fontSize, font_color=fontClr, border_color=boarderClr, pos= (origin_x + 5.5*front_textbox_size[0], origin_y - button_init_y - 1*(front_textbox_size[1]+button_spacing)), size= front_textbox_size, units='norm', grid_horz_justification='center', grid_vert_justification='center') #creating a textbox for the time after the stimulation t_after_ObjORI = visual.TextBox( window=win, text=('T_Aft: ' + str(MovSinGrat_t_afterVal)), font_size = fontSize, font_color=fontClr, border_color=boarderClr, pos= (origin_x + 6.5*front_textbox_size[0], origin_y - button_init_y - 1*(front_textbox_size[1]+button_spacing)), size= front_textbox_size, units='norm', grid_horz_justification='center', grid_vert_justification='center') #creating a Synch textbox synch_ObjORI = visual.TextBox( window=win, text=('Synch:' + str(MovSinGrat_syncVal)), font_size = fontSize, font_color=fontClr, border_color=boarderClr, pos= (origin_x + 7.5*front_textbox_size[0], origin_y - button_init_y - 1*(front_textbox_size[1]+button_spacing)), size= front_textbox_size, units='norm', grid_horz_justification='center', grid_vert_justification='center') #creating a moationMode textbox mtnmode_ObjORI = visual.TextBox( window=win, text=("Motn Mode: " + str(MovSinGrat_Motionmode)), font_size = fontSize, font_color=fontClr, border_color=boarderClr, pos= (origin_x + 8.5*front_textbox_size[0], origin_y - button_init_y - 1*(front_textbox_size[1]+button_spacing)), size= front_textbox_size, units='norm', grid_horz_justification='center', grid_vert_justification='center') #creating a ledstate textbox ledstate_Obj = visual.TextBox( window=win, text=("Ledstate: " + str(MovSinGrat_ledstate)), font_size = fontSize, font_color=fontClr, border_color=boarderClr, pos= (origin_x + 9.5*front_textbox_size[0], origin_y - button_init_y - 1*(front_textbox_size[1]+button_spacing)), size= front_textbox_size, units='norm', grid_horz_justification='center', grid_vert_justification='center') #creating a tuning textbox MovSinGrat_Tuning_Obj = visual.TextBox( window=win, text=("Feature: " + str(MovSinGrat_features)), font_size = fontSize, font_color=fontClr, border_color=boarderClr, pos= (origin_x + 1.5*front_textbox_size[0], origin_y - button_init_y - 1*(front_textbox_size[1]+button_spacing)), size= front_textbox_size, units='norm', grid_horz_justification='center', grid_vert_justification='center') #creating a textbox for calibration code Calibration_Obj = visual.TextBox( window=win, text=("Calibration"), font_size = fontSize, font_color=fontClr, border_color=boarderClr, pos=(origin_x + 0.5*front_textbox_size[0], origin_y - button_init_y - 2*(front_textbox_size[1]+button_spacing)), size= front_textbox_size, units='norm', grid_horz_justification='center', grid_vert_justification='center') Name_vdrumgrating = visual.TextBox( window=win, text=("VdrumGrating"), font_size = fontSize, font_color=fontClr, border_color=boarderClr, pos=(origin_x + 0.5*front_textbox_size[0], origin_y - button_init_y - 3*(front_textbox_size[1]+button_spacing)), size= front_textbox_size, units='norm', grid_horz_justification='center', grid_vert_justification='center') #RF 6X8 TEXTBOX Name_rf6x8 = visual.TextBox( window=win, text=("rf 6x8"), font_size = fontSize, font_color=fontClr, border_color=boarderClr, pos=(origin_x + 0.5*front_textbox_size[0], origin_y - button_init_y - 4*(front_textbox_size[1]+button_spacing)), size= front_textbox_size, units='norm', grid_horz_justification='center', grid_vert_justification='center') #This is where we create the necessary buttons #DRUMGRATING #moving sin gradient start button mvnSinGrdnt = visual.ShapeStim( win = win, units = "norm", fillColor = [0,0,0], ori = 0, pos = Name_Obj.getPosition(), opacity = 0, vertices = front_button_size ) #Spatial Frequency Butrrons SpatFreqBut = visual.ShapeStim( win = win, units = "norm", fillColor = [0,0,0], ori = 0, pos= SpatFreq_Obj.getPosition(), opacity = 0, vertices = front_button_size ) #Temporal Frequency Buttons TempFreqBut = visual.ShapeStim( win = win, units = "norm", fillColor = [0,0,0], ori = 0, pos = TempFreq_Obj.getPosition(), opacity = 0, vertices = front_button_size ) #Time Before stimulation Buttons t_beforeBut = visual.ShapeStim( win = win, units = "norm", fillColor = [0,0,0], ori = 180, pos= t_Before_Obj.getPosition(), opacity = 0, vertices = front_button_size ) t_stimBut = visual.ShapeStim( win = win, units = "norm", fillColor = [0,0,0], ori = 180, pos= t_stim_Obj.getPosition(), opacity = 0, vertices = front_button_size ) #time After stimulation Buttons t_afterBut = visual.ShapeStim( win = win, units = "norm", fillColor = [0,0,0], ori = 0, pos = t_after_Obj.getPosition(), opacity = 0, vertices = front_button_size ) #Synch button synchButDRUM = visual.ShapeStim( win = win, units = "norm", fillColor = [0,0,0], ori = 0, pos = synch_ObjDRUM.getPosition(), opacity = 0, vertices = front_button_size ) #motion mode button mtnModeBut = visual.ShapeStim( win = win, units = "norm", fillColor = [0,0,0], ori = 0, pos = mtnmode_Obj.getPosition(), opacity = 0, vertices = front_button_size ) #MOVSINGRAT TUNING #Tuning button MovSinGrat_TuningBut = visual.ShapeStim( win = win, units = "norm", fillColor = [0,0,0], ori = 0, pos = MovSinGrat_Tuning_Obj.getPosition(), opacity = 0, vertices = front_button_size ) #Spatial Frequency Butrrons SpatFreqButORI = visual.ShapeStim( win = win, units = "norm", fillColor = [0,0,0], ori = 0, pos= SpatFreq_ObjORI.getPosition(), opacity = 0, vertices = front_button_size ) #Temporal Frequency Buttons TempFreqButORI = visual.ShapeStim( win = win, units = "norm", fillColor = [0,0,0], ori = 0, pos = TempFreq_ObjORI.getPosition(), opacity = 0, vertices = front_button_size ) #Time Before stimulation Buttons t_beforeButORI = visual.ShapeStim( win = win, units = "norm", fillColor = [0,0,0], ori = 180, pos= t_Before_ObjORI.getPosition(), opacity = 0, vertices = front_button_size ) t_stimButORI = visual.ShapeStim( win = win, units = "norm", fillColor = [0,0,0], ori = 180, pos= t_stim_ObjORI.getPosition(), opacity = 0, vertices = front_button_size ) #time After stimulation Buttons t_afterButORI = visual.ShapeStim( win = win, units = "norm", fillColor = [0,0,0], ori = 0, pos = t_after_ObjORI.getPosition(), opacity = 0, vertices = front_button_size ) #Synch button synchButORI = visual.ShapeStim( win = win, units = "norm", fillColor = [0,0,0], ori = 0, pos = synch_ObjORI.getPosition(), opacity = 0, vertices = front_button_size ) #motion mode button mtnModeButORI = visual.ShapeStim( win = win, units = "norm", fillColor = [0,0,0], ori = 0, pos = mtnmode_ObjORI.getPosition(), opacity = 0, vertices = front_button_size ) #moving sin gradient start button mvnSinGrdntORI = visual.ShapeStim( win = win, units = "norm", fillColor = [0,0,0], ori = 0, pos = Name_ObjORI.getPosition(), opacity = 0, vertices = front_button_size ) #ledflag button ledstateBut = visual.ShapeStim( win = win, units = "norm", fillColor = [0,0,0], ori = 0, pos = ledstate_Obj.getPosition(), opacity = 0, vertices = front_button_size ) #calibration start button calibration_button = visual.ShapeStim( win = win, units = "norm", fillColor = [0,0,0], ori = 0, pos = Calibration_Obj.getPosition(), opacity = 0, vertices = front_button_size ) #drumgrating with mask, direcitional split, and rotation vdrumGrating_But = visual.ShapeStim( win = win, units = "norm", fillColor = [0,0,0], ori = 0, pos = Name_vdrumgrating.getPosition(), opacity = 0 , vertices = front_button_size ) rf6x8_But = visual.ShapeStim( win = win, units = "norm", fillColor = [0,0,0], ori = 0, pos = Name_rf6x8.getPosition(), opacity = 0 , vertices = front_button_size ) #drawing the textboxes t_after_Obj.draw() t_stim_Obj.draw() t_Before_Obj.draw() TempFreq_Obj.draw() SpatFreq_Obj.draw() Name_Obj.draw() synch_ObjDRUM.draw() mtnmode_Obj.draw() t_after_ObjORI.draw() t_stim_ObjORI.draw() t_Before_ObjORI.draw() TempFreq_ObjORI.draw() SpatFreq_ObjORI.draw() Name_ObjORI.draw() synch_ObjORI.draw() mtnmode_ObjORI.draw() ledstate_Obj.draw() MovSinGrat_Tuning_Obj.draw() Calibration_Obj.draw() Name_vdrumgrating.draw() Name_rf6x8.draw() #drawing the buttons mvnSinGrdnt.draw() t_stimBut.draw() t_afterBut.draw() t_beforeBut.draw() TempFreqBut.draw() SpatFreqBut.draw() synchButDRUM.draw() mtnModeBut.draw() mvnSinGrdntORI.draw() t_stimButORI.draw() t_afterButORI.draw() t_beforeButORI.draw() TempFreqButORI.draw() SpatFreqButORI.draw() synchButORI.draw() mtnModeButORI.draw() ledstateBut.draw() MovSinGrat_TuningBut.draw() calibration_button.draw() vdrumGrating_But.draw() rf6x8_But.draw() #drawing squares square1.draw() square2.draw() #flipping the window to display the textboxes and buttons win.flip() mouse = event.Mouse( visible = True, win = win ) keep_going = True #this loop is to while keep_going: psychopy.core.wait(0.1) currMouse = mouse.getPressed() keys = event.getKeys() #this will retrieve key presses from the buffer and place them into a list (in the order that they were pressed) if mouse.isPressedIn(mvnSinGrdnt) and currMouse[0] == 1: square1.fillColor = [-1,-1,-1] square2.fillColor = [-1,-1,-1] square1.draw() square2.draw() win.flip() drumgrating.drumgrating(drumgrating_SpatFreqVal, drumgrating_tempFreqVal, drumgrating_t_beforeVal, drumgrating_t_DuringVal, drumgrating_t_afterVal, drumgrating_syncVal, drumgrating_Motionmode) #add orientation values: angle0, angle1, randomseq, ledstate) keys = event.getKeys() #retrieving key presses from the buffer during the stimulation keys = [] #clearing the key presses square1.fillColor = [-1,-1,-1] square2.fillColor = [-1,-1,-1] square1.draw() square2.draw() win.flip() elif mouse.isPressedIn(synchButDRUM) and currMouse[0] == 1: drumgrating_syncVal = VS.VS("syncStat", 1, drumgrating_syncVal) synch_ObjDRUM.setText('Synch:' + str(drumgrating_syncVal)) elif mouse.isPressedIn(synchButDRUM) and currMouse[2] == 1: drumgrating_syncVal = VS.VS("syncStat", 0, drumgrating_syncVal) synch_ObjDRUM.setText('Synch:' + str(drumgrating_syncVal)) elif mouse.isPressedIn(t_stimBut) and currMouse[2]==1: drumgrating_t_DuringVal = VS.VS("Duration of Stimulation", 0, drumgrating_t_DuringVal) t_stim_Obj.setText('T_Dur: ' + str(drumgrating_t_DuringVal)), elif mouse.isPressedIn(t_stimBut) and currMouse[0]==1: drumgrating_t_DuringVal = VS.VS("Duration of Stimulation", 1, drumgrating_t_DuringVal) t_stim_Obj.setText('T_Dur: ' + str(drumgrating_t_DuringVal)) elif mouse.isPressedIn(t_afterBut) and currMouse[0]==1: drumgrating_t_afterVal= VS.VS("Time After Stimulation", 1, drumgrating_t_afterVal) t_after_Obj.setText('T_Aft: ' + str(drumgrating_t_afterVal)) elif mouse.isPressedIn(t_afterBut) and currMouse[2]==1: drumgrating_t_afterVal= VS.VS("Time After Stimulation", 0, drumgrating_t_afterVal) t_after_Obj.setText('T_Aft: ' + str(drumgrating_t_afterVal)) elif mouse.isPressedIn(t_beforeBut) and currMouse[2]==1: drumgrating_t_beforeVal = VS.VS("Time before Stimulation", 0, drumgrating_t_beforeVal) t_Before_Obj.setText('T_Bef: '+ str(drumgrating_t_beforeVal)) elif mouse.isPressedIn(t_beforeBut) and currMouse[0]==1: drumgrating_t_beforeVal = VS.VS("Time before Stimulation", 1, drumgrating_t_beforeVal) t_Before_Obj.setText('T_Bef: '+ str(drumgrating_t_beforeVal)) elif mouse.isPressedIn(TempFreqBut) and currMouse[2]==1: drumgrating_tempFreqVal= VS.VS("Temporal Frequency", 0, drumgrating_tempFreqVal) TempFreq_Obj.setText('Tmp Frq: ' + str(drumgrating_tempFreqVal)) elif mouse.isPressedIn(TempFreqBut) and currMouse[0]==1: drumgrating_tempFreqVal= VS.VS("Temporal Frequency", 1, drumgrating_tempFreqVal) TempFreq_Obj.setText('Tmp Frq: ' + str(drumgrating_tempFreqVal)) elif mouse.isPressedIn(SpatFreqBut) and currMouse[2]==1: drumgrating_SpatFreqVal = VS.VS("Spatial Frequency", 0, drumgrating_SpatFreqVal) SpatFreq_Obj.setText('Spt Frq: ' + str(drumgrating_SpatFreqVal)) elif mouse.isPressedIn(SpatFreqBut) and currMouse[0]==1: drumgrating_SpatFreqVal = VS.VS("Spatial Frequency", 1, drumgrating_SpatFreqVal) SpatFreq_Obj.setText('Spt Frq: ' + str(drumgrating_SpatFreqVal)) elif mouse.isPressedIn(mtnModeBut) and currMouse[2]==1: drumgrating_Motionmode = VS.VS("Motionmode", 0, drumgrating_Motionmode) mtnmode_Obj.setText("Motn Mode: " + str(drumgrating_Motionmode)) elif mouse.isPressedIn(mtnModeBut) and currMouse[0]==1: drumgrating_Motionmode = VS.VS("Motionmode", 1, drumgrating_Motionmode) mtnmode_Obj.setText("Motn Mode: " + str(drumgrating_Motionmode)) elif mouse.isPressedIn(synchButORI) and currMouse[0] == 1: MovSinGrat_syncVal = VS.VS("syncStatORI", 1, MovSinGrat_syncVal) synch_ObjORI.setText('Synch:' + str(MovSinGrat_syncVal)) elif mouse.isPressedIn(synchButORI) and currMouse[2] == 1: MovSinGrat_syncVal = VS.VS("syncStatORI", 0, MovSinGrat_syncVal) synch_ObjORI.setText('Synch:' + str(MovSinGrat_syncVal)) elif mouse.isPressedIn(t_stimButORI) and currMouse[2]==1: MovSinGrat_t_stimVal = VS.VS("Duration of StimulationORI", 0, MovSinGrat_t_stimVal) t_stim_ObjORI.setText('T_Dur: ' + str(MovSinGrat_t_stimVal)), elif mouse.isPressedIn(t_stimButORI) and currMouse[0]==1: MovSinGrat_t_stimVal = VS.VS("Duration of StimulationORI", 1, MovSinGrat_t_stimVal) t_stim_ObjORI.setText('T_Dur: ' + str(MovSinGrat_t_stimVal)) elif mouse.isPressedIn(t_afterButORI) and currMouse[0]==1: MovSinGrat_t_afterVal= VS.VS("Time After Stimulation", 1, MovSinGrat_t_afterVal) t_after_ObjORI.setText('T_Aft: ' + str(MovSinGrat_t_afterVal)) elif mouse.isPressedIn(t_afterButORI) and currMouse[2]==1: MovSinGrat_t_afterVal= VS.VS("Time After StimulationORI", 0, MovSinGrat_t_afterVal) t_after_ObjORI.setText('T_Aft: ' + str(MovSinGrat_t_afterVal)) elif mouse.isPressedIn(t_beforeButORI) and currMouse[2]==1: MovSinGrat_t_beforeVal = VS.VS("Time before StimulationORI", 0, MovSinGrat_t_beforeVal) t_Before_ObjORI.setText('T_Bef: '+ str(MovSinGrat_t_beforeVal)) elif mouse.isPressedIn(t_beforeButORI) and currMouse[0]==1: MovSinGrat_t_beforeVal = VS.VS("Time before StimulationORI", 1, MovSinGrat_t_beforeVal) t_Before_ObjORI.setText('T_Bef: '+ str(MovSinGrat_t_beforeVal)) elif mouse.isPressedIn(TempFreqButORI) and currMouse[2]==1: MovSinGrat_tempFreqVal= VS.VS("Temporal FrequencyORI", 0, MovSinGrat_tempFreqVal) TempFreq_ObjORI.setText('Tmp Frq: ' + str(MovSinGrat_tempFreqVal)) elif mouse.isPressedIn(TempFreqButORI) and currMouse[0]==1: MovSinGrat_tempFreqVal= VS.VS("Temporal FrequencyORI", 1, MovSinGrat_tempFreqVal) TempFreq_ObjORI.setText('Tmp Frq: ' + str(MovSinGrat_tempFreqVal)) elif mouse.isPressedIn(SpatFreqButORI) and currMouse[2]==1: MovSinGrat_SpatFreqVal = VS.VS("Spatial FrequencyORI", 0, MovSinGrat_SpatFreqVal) SpatFreq_ObjORI.setText('Spt Frq: ' + str(MovSinGrat_SpatFreqVal)) elif mouse.isPressedIn(SpatFreqButORI) and currMouse[0]==1: MovSinGrat_SpatFreqVal = VS.VS("Spatial FrequencyORI", 1, MovSinGrat_SpatFreqVal) SpatFreq_ObjORI.setText('Spt Frq: ' + str(MovSinGrat_SpatFreqVal)) elif mouse.isPressedIn(mtnModeButORI) and currMouse[2]==1: MovSinGrat_Motionmode = VS.VS("MotionmodeORI", 0, MovSinGrat_Motionmode) mtnmode_ObjORI.setText("Motn Mode: " + str(MovSinGrat_Motionmode)) elif mouse.isPressedIn(mtnModeButORI) and currMouse[0]==1: MovSinGrat_Motionmode = VS.VS("MotionmodeORI", 1, MovSinGrat_Motionmode) mtnmode_ObjORI.setText("Motn Mode: " + str(MovSinGrat_Motionmode)) elif mouse.isPressedIn(ledstateBut) and currMouse[2]==1: MovSinGrat_ledstate = VS.VS("Ledstate :", 0, MovSinGrat_ledstate) ledstate_Obj.setText("Ledstate : " + str(MovSinGrat_ledstate)) elif mouse.isPressedIn(ledstateBut) and currMouse[0]==1: MovSinGrat_ledstate = VS.VS("Ledstate :", 1, MovSinGrat_ledstate) ledstate_Obj.setText("Ledstate : " + str(MovSinGrat_ledstate)) elif mouse.isPressedIn(MovSinGrat_TuningBut) and currMouse[2]==1: MovSinGrat_features = VS.VS("Tuning feature: ", 0, MovSinGrat_features) MovSinGrat_Tuning_Obj.setText("Feature: " + str(MovSinGrat_features)) elif mouse.isPressedIn(MovSinGrat_TuningBut) and currMouse[0]==1: MovSinGrat_features = VS.VS("Tuning feature: ", 1, MovSinGrat_features) MovSinGrat_Tuning_Obj.setText("Feature: " + str(MovSinGrat_features)) elif mouse.isPressedIn(mvnSinGrdntORI) and currMouse[0] == 1: square1.fillColor = [-1,-1,-1] square2.fillColor = [-1,-1,-1] square1.draw() square2.draw() win.flip() movSinGrat_tuning.movSinGrat(MovSinGrat_SpatFreqVal, MovSinGrat_tempFreqVal, MovSinGrat_t_beforeVal, MovSinGrat_t_stimVal, MovSinGrat_t_afterVal, MovSinGrat_syncVal, MovSinGrat_Motionmode, MovSinGrat_features, MovSinGrat_ledstate) #add orientation values: angle0, angle1, randomseq, ledstate) keys = event.getKeys() #retrieving key presses from the buffer during the stimulation keys = [] #clearing the key presses square1.fillColor = [-1,-1,-1] square2.fillColor = [-1,-1,-1] square1.draw() square2.draw() win.flip() elif mouse.isPressedIn(vdrumGrating_But) and currMouse[0] == 1: square1.fillColor = [-1,-1,-1] square2.fillColor = [-1,-1,-1] square1.draw() square2.draw() win.flip() Vdrumgrating.Vdrumgrating(MovSinGrat_SpatFreqVal, MovSinGrat_tempFreqVal, MovSinGrat_t_beforeVal, MovSinGrat_t_stimVal, MovSinGrat_t_afterVal, MovSinGrat_syncVal, MovSinGrat_Motionmode) keys = event.getKeys() #retrieving key presses from the buffer during the stimulation keys = [] #clearing the key presses square1.fillColor = [-1,-1,-1] square2.fillColor = [-1,-1,-1] square1.draw() square2.draw() win.flip() elif mouse.isPressedIn(calibration_button) and currMouse[0] == 1: EYE_calibration.calibration(win) win.flip() elif mouse.isPressedIn(rf6x8_But) and currMouse[0] == 1: square1.fillColor = [-1,-1,-1] square2.fillColor = [-1,-1,-1] square1.draw() square2.draw() win.flip() Rf6x8.rf6x8(win) keys = event.getKeys() #retrieving key presses from the buffer during the stimulation keys = [] #clearing the key presses square1.fillColor = [-1,-1,-1] square2.fillColor = [-1,-1,-1] square1.draw() square2.draw() win.flip() elif keys: if keys[0] == 'escape': break #these functions are to draw all the buttons and textboxes if sum(currMouse) > 0: t_after_Obj.draw() t_stim_Obj.draw() t_Before_Obj.draw() TempFreq_Obj.draw() SpatFreq_Obj.draw() synch_ObjDRUM.draw() Name_Obj.draw() mtnmode_Obj.draw() t_after_ObjORI.draw() t_stim_ObjORI.draw() t_Before_ObjORI.draw() TempFreq_ObjORI.draw() SpatFreq_ObjORI.draw() synch_ObjORI.draw() Name_ObjORI.draw() mtnmode_ObjORI.draw() ledstate_Obj.draw() MovSinGrat_Tuning_Obj.draw() Calibration_Obj.draw() vdrumGrating_But.draw() Name_vdrumgrating.draw() Name_rf6x8.draw() square1.draw() square2.draw() win.flip() win.close()
'Luxi Mono']] if prefered_fonts: font_name=prefered_fonts[0] else: font_name=available_font_names[0][0] # Create a TextBox stim and perform draw on it. Time how long it takes # to create the initial stim and do the initial draw. stime=core.getTime()*1000.0 textbox=visual.TextBox(window=window, text=text, font_name=font_name, font_size=32, font_color=[0,0,0], dpi=72, size=(1.6,.25), pos=(0.0,.25), units='norm', grid_horz_justification='center', grid_vert_justification='center', color_space='rgb255' ) textbox.draw() etime=core.getTime()*1000.0 textbox_init_dur=etime-stime # Create a TextStim stim and perform draw on it. Time how long it takes # to create the initial stim and do the initial draw. stime=core.getTime()*1000.0 textstim = visual.TextStim(window,pos=(0.0,-(display_resolution[1]/4)), alignHoriz='center',alignVert='center',height=32,
from psychopy import visual window = visual.Window() textbox = visual.TextBox()
def movSinGrat(MovSinGrat_SpatFreqVal, MovSinGrat_tempFreqVal, MovSinGrat_t_before, MovSinGrat_t_During, MovSinGrat_t_after, Synch, MovSinGrat_Motionmode, MovSinGrat_features, MovSinGrat_ledstate): ''' INPUT: paramterS that are potentially changed by user in front.py by clicking buttons on the UI OUTPUT: vs stimuli for tuning, depending on the feature selected; default is 'ori' which can be changed in init_para.py; Note >> TUNING FEATURE VALUES are defined as follows: # ori = 0, spat_freq = 1, temp_freq = 2, contrast = 3, location = 4 ''' from psychopy import visual, event, clock, gui from win32api import GetSystemMetrics from datetime import datetime from init_para import ( MovSinGrat_addblank, MovSinGrat_Amp_sinu, MovSinGrat_controlmod, MovSinGrat_dirindex, MovSinGrat_ori, MovSinGrat_t_triginit, MovSinGrat_GammaFactor, MovSinGrat_AmpFactor, MovSinGrat_contrast, MovSinGrat_MeanLum, win, winWidth, winHeight, ScrnNum, PixelSize, winWidthofEachDisp, DisplayFrameWidth, FR, square1, square2, mask_L, mask_R, fontSize, fontClr, win, Local_IP, Local_Port, Remote_IP, Remote_Port, ani_distance, MovSinGrat_Rep, MovSinGrat_randomseq, MovSinGrat_features_dict, MovSinGrat_angles_list, MovSinGrat_temp_lin_list, MovSinGrat_temp_osc_list, MovSinGrat_location_list, MovSinGrat_contrast_list) import socket import numpy as np import conv #To display vs on a single screen set one_screen = True: one_screen = True #creating mouse functionality mouse = event.Mouse(visible=True, win=win) if Synch: #creating the socket in which communications will take place sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) #binding the local IP address and local port sock.bind((Local_IP, Local_Port)) #creating textbox showing that this VS computer is waiting for UDP signal standBy = visual.TextBox( window=win, text=("Waiting for starting the control computer."), font_size=fontSize, font_color=fontClr, pos=(-2690, 475), size=(300, 37), units='pix', grid_horz_justification='center', grid_vert_justification='center') standBy.draw() square1.draw( ) #have to draw trigger squ; otherwise transient white will happen square2.draw() win.flip() try: #wait for the command 'gammacorrection' info = sock.recv(1024) except Exception: sock.close() print("Did not recieve info, connection timeout.") return #sending 'gammafloatampfloat' to the second PC sock.sendto(("gamma" + str(drumgrating_GammaFactor) + "amp" + str(drumgrating_AmpFactor)), (Remote_IP, Remote_Port)) #creating textbox showing that this VS computer is waiting for UDP signal standBy = visual.TextBox(window=win, text=("Control Computer is Ready."), font_size=fontSize, font_color=fontClr, pos=(-2690, 475), size=(300, 37), units='pix', grid_horz_justification='center', grid_vert_justification='center') standBy.draw() try: #waiting for the signal autoVs drumgrating_controlmod = sock.recv(1024) except Exception: sock.close() print( "Did not recieve drumgrating_controlmod, connection timeout.") return #sending 'Wait for parameters' to the second PC sock.sendto("Wait for parameters", (Remote_IP, Remote_Port)) if MovSinGrat_controlmod == 'autoVS': try: drumgrating_parasize = sock.recv(1024) except Exception: sock.close() print("Did not recieve parasize, connection timeout.") return #sending a completion transcript sock.sendto("read parasize", (Remote_IP, Remote_Port)) #converting the string recieved into int drumgrating_parasize = conv.deleteParasize(drumgrating_parasize) #making the array in which the parameters will be added to paras = np.empty(shape=[drumgrating_parasize, 9]) #adding the parameters to the array #this for loop receives the 9 parameters for all the stimulations and adds them to an array for i in range( drumgrating_parasize): #start from 0 to parasize[0] - 1 temp = sock.recv(1024) temp = conv.convStr2Dig(temp) #adding the parameters to the array (temp) at position index #paras[i, :] = temp sock.sendto("Para DONE", (Remote_IP, Remote_Port)) try: #recieving all orientation for stimuli 1 for veritcal, 0 for horizontal paratemp = sock.recv(1024) except Exception: sock.close() print("Did not recieve message, connection timeout.") return paratemp = conv.convStr2Dig(paratemp) #setting up the parameters based on what was send in the paras variable drumgrating_Ori = int(paratemp[0]) Motionmode = int(paratemp[1]) drumgrating_Amp_sinu = paratemp[2] drumgrating_addblank = paratemp[3] sock.sendto("Para DONE", (Remote_IP, Remote_Port)) #creating generalized sequence of randomely shuffled stimuli for tuning, given a particular feature #This firt if loop will create two varialbes: #tuning_stim_val and tuning_stim_ind which contains all unique stimulus and corresponding index, respectively, given the tuning feature selected. if MovSinGrat_features == 0: #ori tuning_stim_val = map( float, MovSinGrat_angles_list ) #map applies the float() function to all elements of the list, therefore converting strings to float tuning_stim_ind = range(len(tuning_stim_val)) elif MovSinGrat_features == 1: #spat_freq tuning_stim_val = map(float, MovSinGrat_spat_list) tuning_stim_ind = range(len(tuning_stim_val)) elif MovSinGrat_features == 2: #temp_freq #assign temp freq depending on Motionmode (lin vs osc motion) if MovSinGrat_Motionmode == 0: tuning_stim_val = map( float, MovSinGrat_temp_lin_list ) #SHOULD THIS BE DEPENDENT ON MOTIONMODE??? tuning_stim_ind = range(len(tuning_stim_val)) else: tuning_stim_val = map( float, MovSinGrat_temp_osc_list ) #SHOULD THIS BE DEPENDENT ON MOTIONMODE??? tuning_stim_ind = range(len(tuning_stim_val)) elif MovSinGrat_features == 3: #contrast tuning_stim_val = MovSinGrat_contrast_list #HAS NOT BEEN CREATED IN INIT_PARA YET; tuning_stim_ind = range(len(tuning_stim_val)) elif MovSinGrat_features == 4: #location tuning_stim_val = map(float, MovSinGrat_location_list) tuning_stim_ind = range(len(tuning_stim_val)) else: print 'ERROR: MovSinGrat_features outside range. Tuning Feature Value must an integer be between 0 and 4.' #calculating total number of stimuli that will be presented (based on number of repetitions for each stimulus*ledstate combo) tot_num_stim = MovSinGrat_Rep * MovSinGrat_ledstate * len( tuning_stim_ind) #defining the variable and array shape in which the parameters will be added to; each column will represent on parameter (spat_freq, temp_freq, stimId etc.) for each presented stimulus (row) paras = np.empty(shape=[tot_num_stim, 12]) #adding the parameters to the array # Generating sequence of order of presenting stimID that will ONLY change the TUNING FEATURE PARAMETER of the stimulus: for repind in xrange(MovSinGrat_Rep): stimId = np.empty(len(tuning_stim_ind) * MovSinGrat_ledstate) stimId = map(int, stimId) if MovSinGrat_randomseq: for iled in xrange( 0, MovSinGrat_ledstate ): #for each ledstate, one of each angle will be assoign in random order np.random.shuffle(tuning_stim_ind) for n in xrange( iled, len(stimId), MovSinGrat_ledstate ): #assign tuning_stim_ind elements to stimId by hops of size ledstate (if ledstate = 1), stimId = tuning_stim_ind; stimId[n] = tuning_stim_ind[n / MovSinGrat_ledstate] # else: for iled in xrange(0, MovSinGrat_ledstate): for n in xrange(iled, len(stimId), MovSinGrat_ledstate): stimId[n] = tuning_stim_ind[n / MovSinGrat_ledstate] #adding the parameters as an array at index i for localstimid in xrange(0, (len(stimId))): if MovSinGrat_features == 0: #0 = ori paras[repind * (len(stimId)) + localstimid, :] = [ MovSinGrat_SpatFreqVal, MovSinGrat_tempFreqVal, MovSinGrat_contrast, MovSinGrat_MeanLum, MovSinGrat_dirindex, MovSinGrat_t_before, MovSinGrat_t_During, MovSinGrat_t_after, MovSinGrat_t_triginit, tuning_stim_val[stimId[localstimid]], MovSinGrat_ledstate, 0 ] # ADD LOCATION elif MovSinGrat_features == 1: #1 = spat paras[repind * (len(stimId)) + localstimid, :] = [ tuning_stim_val[stimId[localstimid]], MovSinGrat_tempFreqVal, MovSinGrat_contrast, MovSinGrat_MeanLum, MovSinGrat_dirindex, MovSinGrat_t_before, MovSinGrat_t_During, MovSinGrat_t_after, MovSinGrat_t_triginit, MovSinGrat_ori, MovSinGrat_ledstate, 0 ] #ADD LOCATION elif MovSinGrat_features == 2: # and movSinGrat_motionMode == 0: #2 = TempFreq paras[repind * (len(stimId)) + localstimid, :] = [ MovSinGrat_SpatFreqVal, tuning_stim_val[stimId[localstimid]], MovSinGrat_contrast, MovSinGrat_MeanLum, MovSinGrat_dirindex, MovSinGrat_t_before, MovSinGrat_t_During, MovSinGrat_t_after, MovSinGrat_t_triginit, MovSinGrat_ori, MovSinGrat_ledstate, 0 ] #ADD LOCATION elif MovSinGrat_features == 3: #3 = contrast paras[repind * (len(stimId)) + localstimid, :] = [ MovSinGrat_SpatFreqVal, MovSinGrat_tempFreqVal, tuning_stim_val[stimId[localstimid]], MovSinGrat_MeanLum, MovSinGrat_dirindex, MovSinGrat_t_before, MovSinGrat_t_During, MovSinGrat_t_after, MovSinGrat_t_triginit, MovSinGrat_ori, MovSinGrat_ledstate, 0 ] #ADD LOCATION #elif MovSinGrat_features == 4: #4 = location # paras[repind*(len(stimId))+localstimid, :] = [MovSinGrat_SpatFreqVal, MovSinGrat_tempFreqVal, MovSinGrat_contrast, MovSinGrat_MeanLum, MovSinGrat_dirindex, # MovSinGrat_t_before, MovSinGrat_t_During, MovSinGrat_t_after, MovSinGrat_t_triginit, MovSinGrat_ori, MovSinGrat_ledstate, 0] #ADD LOCATION paratemp = [ drumgrating_Ori, Motionmode, drumgrating_Amp_sinu, drumgrating_addblank ] #setting up the parameters based on what was send in the paras variable drumgrating_Ori = int(paratemp[0]) Motionmode = int(paratemp[1]) drumgrating_Amp_sinu = paratemp[2] drumgrating_addblank = paratemp[3] elif MovSinGrat_controlmod == 'manualVS': return #if Synch is False, this else condition will make the parameters in the same format as if Synch was True else: #Naming the experiment to create fileName (at the end of this function) instruction_text = visual.TextStim( win, text=u'Name experiment and press enter to start.', pos=(0, 0.5)) answer_text = visual.TextStim(win) #show instructions instruction_text.draw() square1.draw( ) #have to draw trigger squ; otherwise transient white will happen$$$$$$$$$$$$$$$ square2.draw() win.flip() #get users input for experiment name now = True answer_text.text = '' while now: key = event.waitKeys()[0] # Add a new number if key in '1234567890abcdfeghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_-': answer_text.text += key # Delete last character, if there are any chars at all elif key == 'backspace' and len(answer_text.text) > 0: answer_text.text = answer_text.text[:-1] # Stop collecting response and return it elif key == 'return': expName = answer_text.text print('expName IN here: ', expName) now = False # Show current answer state instruction_text.draw() answer_text.draw() square1.draw( ) #have to draw trigger squ; otherwise transient white will happen$$$$$$$$$$$$$ square2.draw() win.flip() #setting name of file which will be used to save order of vs stim displayed; NAME = MVS (movSinGrat) + type of tuning feature manipulated in experiment + datetime feature = MovSinGrat_features_dict.keys() date = datetime.today().strftime( '%Y%m%d_%H%M%S') #extract today's date fileName = expName + '_vs_' + feature[ MovSinGrat_features] + '_' + date #exp name defined above either by user (if not synch) or by eye tracking software (if user) #creating generalized sequence of randomely shuffled stimuli for tuning, given a particular feature #This firt if loop will create two varialbes: #tuning_stim_val and tuning_stim_ind which contains all unique stimulus and corresponding index, respectively, given the tuning feature selected. if MovSinGrat_features == 0: #ori tuning_stim_val = map( float, MovSinGrat_angles_list ) #map applies the float() function to all elements of the list, therefore converting strings to float tuning_stim_ind = range(len(tuning_stim_val)) elif MovSinGrat_features == 1: #spat_freq tuning_stim_val = map(float, MovSinGrat_spat_list) tuning_stim_ind = range(len(tuning_stim_val)) elif MovSinGrat_features == 2: #temp_freq #assign temp freq depending on Motionmode (lin vs osc motion) if MovSinGrat_Motionmode == 0: tuning_stim_val = map(float, MovSinGrat_temp_lin_list) tuning_stim_ind = range(len(tuning_stim_val)) else: tuning_stim_val = map(float, MovSinGrat_temp_osc_list) tuning_stim_ind = range(len(tuning_stim_val)) elif MovSinGrat_features == 3: #contrast tuning_stim_val = MovSinGrat_contrast_list tuning_stim_ind = range(len(tuning_stim_val)) elif MovSinGrat_features == 4: #location tuning_stim_val = map(float, MovSinGrat_location_list) tuning_stim_ind = range(len(tuning_stim_val)) else: print 'ERROR: MovSinGrat_features outside range. Tuning Feature Value must an integer be between 0 and 4.' #calculating total number of stimuli that will be presented (based on number of repetitions for each stimulus*ledstate combo) tot_num_stim = MovSinGrat_Rep * MovSinGrat_ledstate * len( tuning_stim_ind) #defining the variable and array shape in which the parameters will be added to; each column will represent on parameter (spat_freq, temp_freq, stimId etc.) for each presented stimulus (row) paras = np.empty(shape=[tot_num_stim, 12]) #adding the parameters to the array # Generating sequence of order of presenting stimID that will ONLY change the TUNING FEATURE PARAMETER of the stimulus: for repind in xrange(MovSinGrat_Rep): stimId = np.empty(len(tuning_stim_ind) * MovSinGrat_ledstate) stimId = map(int, stimId) if MovSinGrat_randomseq: for iled in xrange( 0, MovSinGrat_ledstate ): #for each ledstate, one of each angle will be assoign in random order np.random.shuffle(tuning_stim_ind) for n in xrange( iled, len(stimId), MovSinGrat_ledstate ): #assign tuning_stim_ind elements to stimId by hops of size ledstate (if ledstate = 1), stimId = tuning_stim_ind; stimId[n] = tuning_stim_ind[n / MovSinGrat_ledstate] else: for iled in xrange(0, MovSinGrat_ledstate): for n in xrange(iled, len(stimId), MovSinGrat_ledstate): stimId[n] = tuning_stim_ind[n / MovSinGrat_ledstate] #adding the parameters as an array at index i for localstimid in xrange(0, (len(stimId))): if MovSinGrat_features == 0: #0 = ori paras[repind * (len(stimId)) + localstimid, :] = [ MovSinGrat_SpatFreqVal, MovSinGrat_tempFreqVal, MovSinGrat_contrast, MovSinGrat_MeanLum, MovSinGrat_dirindex, MovSinGrat_t_before, MovSinGrat_t_During, MovSinGrat_t_after, MovSinGrat_t_triginit, tuning_stim_val[stimId[localstimid]], MovSinGrat_ledstate, 0 ] # ADD LOCATION elif MovSinGrat_features == 1: #1 = spat paras[repind * (len(stimId)) + localstimid, :] = [ tuning_stim_val[stimId[localstimid]], MovSinGrat_tempFreqVal, MovSinGrat_contrast, MovSinGrat_MeanLum, MovSinGrat_dirindex, MovSinGrat_t_before, MovSinGrat_t_During, MovSinGrat_t_after, MovSinGrat_t_triginit, MovSinGrat_ori, MovSinGrat_ledstate, 0 ] #ADD LOCATION elif MovSinGrat_features == 2: # and movSinGrat_motionMode == 0: #2 = TempFreq paras[repind * (len(stimId)) + localstimid, :] = [ MovSinGrat_SpatFreqVal, tuning_stim_val[stimId[localstimid]], MovSinGrat_contrast, MovSinGrat_MeanLum, MovSinGrat_dirindex, MovSinGrat_t_before, MovSinGrat_t_During, MovSinGrat_t_after, MovSinGrat_t_triginit, MovSinGrat_ori, MovSinGrat_ledstate, 0 ] #ADD LOCATION elif MovSinGrat_features == 4: #4 = contrast paras[repind * (len(stimId)) + localstimid, :] = [ MovSinGrat_SpatFreqVal, MovSinGrat_tempFreqVal, tuning_stim_val[stimId[localstimid]], MovSinGrat_MeanLum, MovSinGrat_dirindex, MovSinGrat_t_before, MovSinGrat_t_During, MovSinGrat_t_after, MovSinGrat_t_triginit, MovSinGrat_ori, MovSinGrat_ledstate, 0 ] #ADD LOCATION #elif MovSinGrat_features == 5: #5 = location # paras[repind*(len(stimId))+localstimid, :] = [MovSinGrat_SpatFreqVal, MovSinGrat_tempFreqVal, MovSinGrat_contrast, MovSinGrat_MeanLum, MovSinGrat_dirindex, # MovSinGrat_t_before, MovSinGrat_t_During, MovSinGrat_t_after, MovSinGrat_t_triginit, MovSinGrat_ori, MovSinGrat_ledstate, 0] #ADD LOCATION #paratemp = [drumgrating_Ori, Motionmode, drumgrating_Amp_sinu, drumgrating_addblank] #setting up the parameters based on what was send in the paras variable #drumgrating_Ori = int(paratemp[0]) #Motionmode = int(paratemp[1]) #drumgrating_Amp_sinu = paratemp[2] #drumgrating_addblank = paratemp[3] if Synch: #waiting for "STR" while True: try: info = sock.recv(1024) except: pass if info == "STR": sock.sendto(("VS is running"), (Remote_IP, Remote_Port)) break if mouse.getPressed()[1]: sock.close() return #generating the pixel angles relaive to the mouse position based on the orientation of the stimulus #generating matrix that will be the place holder for every pixel pixelangle = np.empty(shape=[1, winWidth ]) #pixel has to be 2D since the image is 2D temp = np.array(range(winWidthofEachDisp)) temp.reshape(1, winWidthofEachDisp) # the temp must be 2D #tempPixelAngle = np.degrees(np.arctan((temp - (winWidthofEachDisp/2.0))*PixelSize*(2.0/DisplayFrameWidth))) + 45 #calculating the pixel angle for first monitor spatangperpix = np.degrees(np.arctan(PixelSize / ani_distance)) tempPixelAngle = spatangperpix * temp for i in range(ScrnNum): pixelangle[:, i * winWidthofEachDisp:( i + 1 ) * winWidthofEachDisp] = tempPixelAngle + 90 * i #taking specific ranges within the full winWidth and replacing the values with the corresponding angles #Genereating the VS based on the parameters in paras for m in xrange(tot_num_stim): paras[m, 11] = 1 #marks which stim have been presented to the animal tic = clock.getTime() if m == 0: SpatFreqDeg = paras[m, 0] TempFreq = paras[m, 1] contrast = paras[m, 2] MeanLum = paras[m, 3] dirindex = paras[m, 4] t_before = paras[m, 5] t_During = paras[m, 6] t_after = paras[m, 7] t_triginit = paras[m, 8] orientation = paras[m, 9] ledstate = paras[m, 10] pixelformeanlum = 2 * (np.exp( np.log(MovSinGrat_MeanLum / MovSinGrat_AmpFactor) / MovSinGrat_GammaFactor) / 255.0) - 1 MovSinGrat_gray = MovSinGrat_MeanLum inc = MovSinGrat_gray * MovSinGrat_contrast #frames to be calculated per period frames = round(FR / TempFreq) phase = np.array(range(int(frames))) if MovSinGrat_Motionmode == 1: phase = (phase / float(round(frames))) * (2.0 * np.pi) elif MovSinGrat_Motionmode == 0: phase = MovSinGrat_Amp_sinu * np.sin( (phase / frames) * 2 * np.pi) * SpatFreqDeg * 2 * np.pi #generating the pixel values for the stimulus #creating the list that will hold all frames texdata1D = [] #generating the pixel values for vertical stimulus for i in range(int(frames)): texdata1DTmp = np.exp( np.log((MovSinGrat_gray + inc * np.sin(pixelangle * SpatFreqDeg * 2 * np.pi + phase[i])) / MovSinGrat_AmpFactor) / MovSinGrat_GammaFactor) pixVal = 2 * ( texdata1DTmp / 255) - 1 #converting the pixel values from 0:255 to -1:1 texdata1D.append(pixVal) else: if sum(abs(paras[m, :] - paras[m - 1, :])) > 1e-7: #if (not all([v == 0 for v in abs(paras[m, :] - paras[m-1, :])])): SpatFreqDeg = paras[m, 0] TempFreq = paras[m, 1] MovSinGrat_contrast = paras[m, 2] MovSinGrat_MeanLum = paras[m, 3] MovSinGrat_dirindex = paras[m, 4] t_before = paras[m, 5] t_During = paras[m, 6] t_afterVal = paras[m, 7] MovSinGrat_t_triginit = paras[m, 8] orientation = paras[m, 9] ledstate = paras[m, 10] pixelformeanlum = 2 * (np.exp( np.log(MovSinGrat_MeanLum / MovSinGrat_AmpFactor) / MovSinGrat_GammaFactor) / 255.0) - 1 MovSinGrat_gray = MovSinGrat_MeanLum inc = MovSinGrat_gray * MovSinGrat_contrast #frames to be calculated per period frames = round(FR / TempFreq) phase = np.array(range(int(frames))) if MovSinGrat_Motionmode == 1: phase = (phase / float(round(frames))) * (2.0 * np.pi) elif MovSinGrat_Motionmode == 0: phase = MovSinGrat_Amp_sinu * np.sin( (phase / frames) * 2 * np.pi) * SpatFreqDeg * 2 * np.pi #generating the pixel values for the stimulus #creating the list that will hold all frames texdata1D = [] #generating the pixel values for vertical stimulus for i in range(int(frames)): texdata1DTmp = np.exp( np.log((MovSinGrat_gray + inc * np.sin(pixelangle * SpatFreqDeg * 2 * np.pi + phase[i])) / MovSinGrat_AmpFactor) / MovSinGrat_GammaFactor) pixVal = 2 * ( texdata1DTmp / 255) - 1 #converting the pixel values from 0:255 to -1:1 texdata1D.append(pixVal) #creating the looping variable for the simulation depending on the value of MovSinGrat_addblank if MovSinGrat_addblank == 0 or MovSinGrat_addblank == 1: #this variable controls the looping and frame that is to be displayed frmNum = 0 #frame number within one cycle elif MovSinGrat_addblank == 2 and m == 0: #this variable controls the looping and frame that is to be displayed frmNum = 0 #frame number within one cycle #setting up the grating DrawTexture = visual.GratingStim(win=win, size=[2 * winWidth, 2 * winWidth], units='pix', tex=texdata1D[0], ori=orientation) if Synch: #waiting for "TRLstart", if TRLstart is sent this loop will send "TRLstart m" then break sock.settimeout(0.5) comm = [""] while True: try: comm = sock.recvfrom(1024) except Exception: pass if comm[0] == "TRLstart": sock.sendto(("TRLstart " + str(m + 1)), (Remote_IP, Remote_Port)) break elif comm[ 0] == "ESC1": #if 'ESC1' is in the buffer, return to front sock.close() return if mouse.getPressed()[1]: sock.close() print("Exit at ESC1") return if MovSinGrat_addblank == 1.0: win.color = pixelformeanlum elif MovSinGrat_addblank == 0.0: DrawTexture.draw() elif MovSinGrat_addblank == 2.0: DrawTexture.tex = texdata1D[frmNum] DrawTexture.draw() frmNum = frmNum + 1 if frmNum >= len(texdata1D): frmNum = 0 #mask R and L screen to display stim on front screen only if one_screen: mask_L.draw() mask_R.draw() square1.draw() square2.draw() win.flip() #time before the stimulation toc = clock.getTime() - tic while toc < (t_before / 1000.0): toc = clock.getTime() - tic if MovSinGrat_addblank == 2: #assigning the texture using the corrusponding frame DrawTexture.tex = texdata1D[frmNum] #this if statement is for existing the stimulation if mouse.getPressed()[1]: if Synch: sock.close() return frmNum = frmNum + 1 if frmNum >= len(texdata1D): frmNum = 0 DrawTexture.draw() #mask R and L screen to display stim on front screen only if one_screen: mask_L.draw() mask_R.draw() square1.draw() square2.draw() win.flip() #t_triger initial timing for triggerin the camera for i in range(int(FR * MovSinGrat_t_triginit / 1000.0)): if i < 3: square1.fillColor = [1, 1, 1] square2.fillColor = [-1, -1, -1] else: square1.fillColor = [-1, -1, -1] square2.fillColor = [-1, -1, -1] if MovSinGrat_addblank == 1.0: win.color = pixelformeanlum elif MovSinGrat_addblank == 0.0: DrawTexture.draw() elif MovSinGrat_addblank == 2: #assigning the texture using the corrusponding frame DrawTexture.tex = texdata1D[frmNum] frmNum = frmNum + 1 if frmNum >= len(texdata1D): frmNum = 0 DrawTexture.draw() if mouse.getPressed()[1]: if Synch: sock.close() return #mask R and L screen to display stim on front screen only if one_screen: mask_L.draw() mask_R.draw() square1.draw() square2.draw() win.flip() #making the top square white square1.fillColor = [-1, -1, -1] square2.fillColor = [1, 1, 1] #drawing the frames on the window for frm in range(int(FR * t_During / 1000.0)): #assigning the texture using the corrusponding frame DrawTexture.tex = texdata1D[frmNum] #this if statement is for existing the stimulation if mouse.getPressed()[1]: if Synch: sock.close() return frmNum = frmNum + 1 if frmNum >= len(texdata1D): frmNum = 0 DrawTexture.draw() #mask R and L screen to display stim on front screen only if one_screen: mask_L.draw() mask_R.draw() square1.draw() square2.draw() win.flip() #save vs data in .csv format #create a temp list variable that stores array values that will be appended save_row = paras[m].tolist() #open and append values to new file with open(fileName + '.csv', 'a') as f: for i in range(len(save_row)): f.write(str(save_row[i]) + ',') f.write('\n') if Synch: sock.sendto(("TRLdone " + str(m + 1)), (Remote_IP, Remote_Port)) #changing the characteristics of the two squares at the bottom left corner square1.fillColor = [-1, -1, -1] square2.fillColor = [-1, -1, -1] #time after the stimulation for toc in range(int(t_after * FR / 1000.0)): if MovSinGrat_addblank == 1.0: win.color = pixelformeanlum elif MovSinGrat_addblank == 0.0: DrawTexture.draw() elif MovSinGrat_addblank == 2: #assigning the texture using the corrusponding frame DrawTexture.tex = texdata1D[frmNum] frmNum = frmNum + 1 if frmNum >= len(texdata1D): frmNum = 0 DrawTexture.draw() #mask R and L screen to display stim on front screen only if one_screen: mask_L.draw() mask_R.draw() square1.draw() square2.draw() win.flip() if Synch: #checking for stop button while True: try: comm = sock.recvfrom(1024) except: pass if comm[0] == "ESC1": sock.close() return elif comm[0] == "ESC0": break elif mouse.getPressed()[1]: sock.close() print("Exit at ESC2") return
def __init__(self, CONF): self.CONF = CONF # fetch the most recent calib for this monitor mon = monitors.Monitor('tesfgft') mon.setWidth(CONF["screen"]["size"][0]) mon.setSizePix(CONF["screen"]["resolution"]) self.window = visual.Window( size=CONF["screen"]["resolution"], # display_resolution=CONF["screen"]["resolution"], monitor=mon, fullscr=CONF["screen"]["full"], # units="cm", allowGUI=True ) # set up instructions and overview self.task = visual.TextStim(self.window, text=CONF["task"]["name"], alignHoriz='center', alignVert='center', height=CONF["instructionSizes"]["taskHeight"], pos=CONF["instructionSizes"]["taskPos"], units="cm" ) self.session = visual.TextStim(self.window, text="P" + CONF["participant"] + " Session " + CONF["session"] + " " + CONF["version"], pos=CONF["instructionSizes"]["sessionPos"], height=CONF["instructionSizes"]["sessionHeight"], alignHoriz='center', alignVert='center', units="cm" ) self.instructions = visual.TextStim( self.window, text=CONF["instructions"]["text"], height=CONF["instructionSizes"]["instructionsHeight"], units="cm") self.startPrompt = visual.TextStim( self.window, text=CONF["instructions"]["startPrompt"], height=CONF["instructionSizes"]["startPromptHeight"], units="cm", pos=(0, -CONF["screen"]["size"][1]/2+3)) self.cue = visual.TextStim(self.window) # Setup fixation box self.fixation_box = visual.Rect( self.window, height=CONF["fixation"]["height"], width=CONF["fixation"]["width"], fillColor=CONF["fixation"]["boxColor"], lineColor=CONF["fixation"]["boxColor"], units=CONF["screen"]["units"]) # setup stopwatch # self.counter = visual.TextStim(self.window) self.counter = visual.TextBox(window=self.window, # border_color=[-1, -1, 1], ## grid_color=[-1, -1, 1], ## textgrid_shape=(10, 1), # grid_stroke_width=1, # textgrid_shape=[20, 4], font_color=[1, 1, 1], size=(1, 1), font_size=41, pos=(0.05, 0), grid_horz_justification='center', grid_vert_justification='center', # units='cm', #TODO: get this to work )