def test_NoiseStim_defaults_image(self): noiseType = 'image' # noiseImage kwarg missing. with pytest.raises(ValueError): visual.NoiseStim(win=self.win, noiseType=noiseType, size=(32, 32), units='pix')
def test_NoiseStim_defaults(self): noiseTypes = ['binary', 'uniform', 'normal', 'white', 'filtered'] for noiseType in noiseTypes: stim = visual.NoiseStim(win=self.win, noiseType=noiseType, size=(32, 32), units='pix') stim.updateNoise() stim.draw()
def test_noiseFiltersAndRaisedCos(self): numpy.random.seed(1) win = self.win size = numpy.array([2.0,2.0])*self.scaleFactor tres=128 elementsize=4 sf=None ntype='Binary' comp='Amplitude' fileName = os.path.join(utils.TESTS_DATA_PATH, 'testimagegray.jpg') if win.units in ['pix']: ftype='Butterworth' size = numpy.array([128,128]) elif win.units in ['degFlatPos']: ftype='Gabor' sf=0.125 elementsize=1 elif win.units in ['degFlat']: ftype='Isotropic' sf=0.125 elementsize=1 elif win.units in ['deg']: ntype='Image' ftype='Butterworth' sf=0.125 elif win.units in ['cm']: ntype='Image' ftype='Butterworth' comp='Phase' sf=0.25 else: if self.contextName=='stencil': ntype='White' ftype='Butterworth' elif self.contextName=='height': ntype='Uniform' ftype='Butterworth' else: ntype='Normal' ftype='Butterworth' elementsize=1.0/8.0 image = visual.NoiseStim(win=win, name='noise',units=win.units, noiseImage=fileName, mask='raisedCos', ori=0, pos=(0, 0), size=size, sf=sf, phase=0, color=[1,1,1], colorSpace='rgb', opacity=1, blendmode='avg', contrast=0.5, texRes=tres, noiseType=ntype, noiseElementSize=elementsize, noiseBaseSf=32.0/size[0], noiseBW=0.5, noiseBWO=7, noiseFractalPower=-1,noiseFilterLower=4.0/size[0], noiseFilterUpper=16.0/size[0], noiseFilterOrder=1, noiseOri=45.0, noiseClip=4.0, imageComponent=comp, filter=ftype, interpolate=False, depth=-1.0) image.draw() utils.compareScreenshot('noiseFiltersAndRcos_%s.png' %(self.contextName), win) win.flip() str(image)
def generate_noise(): global generated_noise global inverse_noise background_noise = visual.NoiseStim(win=mywin, units='pix', ori=0, pos=(0, 0), size=(1024, 1024), color=[1, 1, 1], colorSpace='rgb', opacity=1, blendmode='avg', contrast=1.0, texRes=512, noiseType='Binary', noiseElementSize=noise_size) background_noise.draw() generated_noise = mywin.getMovieFrame(buffer='back') inverse_noise = PIL.ImageOps.invert(generated_noise)
monitor='testMonitor', color=[0,0,0], colorSpace='rgb', blendMode='avg', useFBO=True, units='height') # store frame rate of monitor if we can measure it expInfo['frameRate'] = win.getActualFrameRate() if expInfo['frameRate'] != None: frameDur = 1.0 / round(expInfo['frameRate']) else: frameDur = 1.0 / 60.0 # could not measure, so guess # Initialize components for Routine "trial" trialClock = core.Clock() noise = visual.NoiseStim( win=win, name='noise', noiseImage=None, mask=None, ori=0, pos=(0, 0), size=(0.5, 0.5), sf=None, phase=0.0, color=[1,1,1], colorSpace='rgb', opacity=1, blendmode='avg', contrast=1.0, texRes=128, noiseType='Binary', noiseElementSize=0.0625, noiseBaseSf=8.0, noiseBW=1, noiseBWO=1, noiseFractalPower=0.0,noiseFilterLower=1.0, noiseFilterUpper=8.0, noiseFilterOrder=0.0, noiseClip=3.0, interpolate=False, depth=0.0) noise.buildNoise() # Create some handy timers globalClock = core.Clock() # to track the time since experiment started routineTimer = core.CountdownTimer() # to track time remaining of each (non-slip) routine # ------Prepare to start Routine "trial"------- t = 0 trialClock.reset() # clock frameN = -1 continueRoutine = True routineTimer.add(1.000000)
fullscr=True) grating1 = visual.GratingStim(win=window, size=(2000, 2000), sf=0.01, ori=45, contrast=.2, opacity=1) grating2 = visual.GratingStim(win=window, size=(2000, 2000), sf=0.01, ori=135, contrast=.2, opacity=.5) noise1 = visual.NoiseStim(win=window, units='pix', noiseType='white', size=(1650, 1050), opacity=.3) mouse = event.Mouse(visible=False) timer = core.Clock() seed = random.randrange(1e6) rng = random.Random(seed) print(seed) ####################### # Feedback Global Settings abortKey = '9' correct1 = sound.Sound(500, secs=.1) correct2 = sound.Sound(1000, secs=.1) error = sound.Sound(300, secs=.3) wrongKey = sound.Sound(100, secs=1)
noAudio = False, filename=cap, ori=0, pos=(0, 0), opacity=1, loop=False, depth=0.0, ) sound_1 = sound.Sound('A', secs=-1, stereo=True, hamming=True, name='sound_1') sound_1.setVolume(1) noise = visual.NoiseStim( win=win, name='noise', noiseImage=None, mask=None, ori=0, pos=(0, 0), size=(0.6, 0.6), sf=None, phase=0.0, color=[1,1,1], colorSpace='rgb', opacity=0.95, blendmode='avg', contrast=1.0, texRes=128, filter=None, noiseType='Binary', noiseElementSize=0.0625, noiseBaseSf=8.0, noiseBW=1, noiseBWO=30, noiseOri=0.0, noiseFractalPower=0.0,noiseFilterLower=1.0, noiseFilterUpper=8.0, noiseFilterOrder=0.0, noiseClip=3.0, imageComponent='Phase', interpolate=False, depth=-2.0) noise.buildNoise() sound_2 = sound.Sound('C', secs=-1, stereo=True, hamming=True, name='sound_2') sound_2.setVolume(1) noise_2 = visual.NoiseStim( win=win, name='noise_2', noiseImage=None, mask=None, ori=0, pos=(0, 0), size=(0.6, 0.6), sf=None, phase=0.0,
languageStyle='LTR', depth=0.0); # Initialize components for Routine "Video_Tuning" Video_TuningClock = core.Clock() accuracy = 0 target_accuracy = .5 noise_level = 0 minimum_iterations = 10 margin_of_error = .05 noise = visual.NoiseStim( win, noiseType="white", noiseElementSize=.1, opacity=.5, size=(2,2) ) video_stimulus = visual.MovieStim3( win=win, name='video_stimulus',units='norm', noAudio = True, filename='GA.mov', ori=0, pos=(0, 0), opacity=1, size=(2, 2), depth=-1.0, ) prompt = visual.TextStim(win=win, name='prompt', text='What was said:\nBA, DA, or GA?\nPress B, D, or G.', font='Arial', pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
win = visual.Window([100, 110], units='pix', color=bg_scale) stim_A = visual.TextStim(win, text='A', units='pix', height=120, color=flk_scale) stim_A.draw() win.flip() win._getFrame().save('stimuli/set1-' + 'AH-0.00' + '.png', size=(100, 110)) win.close() ###################### win = visual.Window([800, 150], units='pix', color=bg_scale) stim_A = visual.NoiseStim(win, name='noise', units='pix', noiseType='Normal', color=flk_scale) stim_A.draw() win.flip() win._getFrame().save('stimuli/set1-' + 'AH-0.00' + '.png', size=(100, 110)) win.close() win = visual.Window([100, 110], units='pix', color=bg_scale) stim_H = visual.TextStim(win, text='H', units='pix', height=120, color=flk_scale) stim_H.draw() win.flip()
timer = core.Clock() seed = random.randrange(1e6) rng = random.Random(seed) correct1=sound.Sound(500,secs=.1) correct2=sound.Sound(1000,secs=.1) error=sound.Sound(300,secs=.2) #################################### ## TRIAL SET UP ################# #################################### blank=visual.TextStim(win=window,text="") grate= visual.GratingStim(win=window,sf=.05,size=256,mask="gauss", ori=1,opacity=.3) noise=visual.NoiseStim(win=window,size=(256,256),noiseType="uniform", noiseElementSize=2,opacity=0) def doTrial(grateOri,noiseOp): if grateOri<0: side=0 else: side=1 noise.opacity=noiseOp grate.ori=grateOri blank.draw() window.flip() core.wait(.2) grate.draw()
def __init__(self, sub_val, n_trial, mode='uniform', atten_task=False): # subject name/id self.sub_val = sub_val self.time_stmp = datetime.now().strftime("%d_%m_%Y_%H_%M_") # create condition sequence / record file for each subject self.data_dir = os.path.join('.', 'Neural', self.sub_val) self.record_path = os.path.join(self.data_dir, self.sub_val + '.json') if os.path.exists(self.record_path): with open(self.record_path, 'r') as file_handle: self.sub_record = json.load(file_handle) else: os.mkdir(self.data_dir) cond_seq = list(range(3)) * self.SEN_NUM np.random.shuffle(cond_seq) self.sub_record = { 'Cond_Seq': cond_seq, 'Cond_Ctr': 0, '0': 0, '1': 0, '2': 0 } self._save_json() print('create subject file at ' + self.record_path) # will be used for recording response self.resp_flag = True self.increment = 0 # parameter for the experiment self.n_trial = n_trial self.mode = mode self.atten_task = atten_task self.show_center = True self.line_len = self.DEFAULT_LEN self.stim_dur = self.DEFAULT_DUR self.delay = self.DEFAULT_DELAY self.blank = self.DEFAULT_BLANK # read in stim sequence with open(self.STIM_SEQ_PATH, 'r') as seq_file: stim_seq = seq_file.read().replace('\n', ' ').split() stim_seq = list(map(int, stim_seq)) self.stim_seq = np.array(stim_seq).reshape( (self.SEN_NUM, self.SEQ_LEN * 2)) # initialize window, message # monitor = 'rm_413' for psychophysics and 'sc_3t' for imaging session self.win = visual.Window(size=(1920, 1080), fullscr=True, allowGUI=True, screen=1, monitor='sc_3t', units='deg', winType=window_backend) # initialize stimulus self.target = visual.GratingStim(self.win, sf=0.50, size=10.0, mask='raisedCos', maskParams={'fringeWidth': 0.25}, contrast=0.10) self.surround = visual.GratingStim(self.win, sf=0.50, size=18.0, mask='raisedCos', contrast=0.10) self.noise = visual.NoiseStim(self.win, units='pix', mask='raisedCos', size=1024, contrast=0.10, noiseClip=3.0, noiseType='Filtered', texRes=1024, noiseElementSize=4, noiseFractalPower=0, noiseFilterLower=7.5 / 1024.0, noiseFilterUpper=12.5 / 1024.0, noiseFilterOrder=3.0) self.fixation = visual.GratingStim(self.win, color=0.5, colorSpace='rgb', tex=None, mask='raisedCos', size=0.25) self.feedback = visual.Line(self.win, start=(0.0, -self.line_len), end=(0.0, self.line_len), lineWidth=5.0, lineColor='black', size=1, contrast=0.80) self.prob = visual.GratingStim(self.win, sf=0.5, size=[2.0, 5.0], mask='gauss', contrast=1.0) # data recorder self.record = DataRecord() return