instructions_text = visual.TextStim(win=win, name='instructions_text', text="In this experiment you are going to hear different sounds. Your task is to decide, as quickly as possible and without making mistakes, if what you heard sounds more like 'ba' or 'da'. \n\nYou will use the arrow keys to make your decision. Let's do a practice round to make sure you understand. \n\nPress the spacebar to begin. ", font='Arial', pos=[0, 0], height=0.1, wrapWidth=None, ori=0, color='white', colorSpace='rgb', opacity=1, depth=0.0); # Initialize components for Routine "practice" practiceClock = core.Clock() crossPractice = visual.TextStim(win=win, name='crossPractice', text='+', font='Arial', pos=[0, 0], height=0.5, wrapWidth=None, ori=0, color='red', colorSpace='rgb', opacity=1, depth=0.0); practice_sound = sound.Sound('A', secs=-1) practice_sound.setVolume(1) practice_ba = visual.TextStim(win=win, name='practice_ba', text='ba', font='Arial', pos=[-0.5, -0.3], height=0.3, wrapWidth=None, ori=0, color='white', colorSpace='rgb', opacity=1, depth=-3.0); practice_da = visual.TextStim(win=win, name='practice_da', text='da', font='Arial', pos=[0.5, -0.3], height=0.3, wrapWidth=None, ori=0, color='white', colorSpace='rgb', opacity=1, depth=-4.0); # Initialize components for Routine "msg"
fullscr=True, allowGUI=True, monitor='WordComposite', units='deg') #Using WordComposite monitor set at 57cm viewing distance win.mouseVisible = False fixation = visual.PatchStim(win, color=-1, tex=None, mask='circle', size=0.2, units='deg') #mask=visual.RadialStim(win, tex='sqrXsqr',color=1, size=8, # visibleWedge=[0, 360], radialCycles=4, angularCycles=8, interpolate=False, # autoLog=False) mask = visual.GratingStim(win=win, units="deg", size=7) mask.sf = 5.0 / 7 corSnd = sound.Sound(2200, octave=14, secs=0.01) # auditory feedback incorSnd = sound.Sound(800, octave=7, secs=0.01) # auditory feedback #cue for which side is the target cueLeftVert = [(-2, 3), (-3, 3), (-3, -3), (-2, -3)] cueLeft = visual.ShapeStim(win, vertices=cueLeftVert, closeShape=False, lineWidth=5, pos=(-3, 0), lineColor='black') cueRightVert = [(2, 3), (3, 3), (3, -3), (2, -3)] cueRight = visual.ShapeStim(win, vertices=cueRightVert, closeShape=False, lineWidth=5,
sessionID=1 ######################################## # GLOBAL SETTINGS ##################### ######################################## window=visual.Window(units= "pix", allowGUI=False, size=(400,400), color=[0,0,0], fullscr = True) mouse = event.Mouse(visible=False) timer = core.Clock() seed = random.randrange(1e6) rng = random.Random(seed) correct1=sound.Sound(500,secs=.1) correct2=sound.Sound(1000,secs=.1) error=sound.Sound(300,secs=.2) #################################### ## TRIAL SET UP ################# #################################### def makeArray(p,mx,my): n=mx*my x=int((1-p)*n+.5) red=np.reshape(np.array([255,0,0]*x,dtype=np.uint8),(x,3)) green=np.reshape(np.array([0,255,0]*(n-x),dtype=np.uint8),(n-x,3)) a=np.concatenate((red,green))
# Initialize components for Routine "trial" trialClock = core.Clock() text = visual.TextStim(win=win, name='text', text='default text', font='Arial', units='pix', pos=(0, 0), height=45, wrapWidth=None, ori=0, color='white', colorSpace='rgb', opacity=1, depth=0.0) sound_1 = sound.Sound(u'A', secs=0.3) sound_1.setVolume(1) # Initialize components for Routine "EnterResponses" EnterResponsesClock = core.Clock() ResponseText = visual.TextStim(win=win, name='ResponseText', text='Please repeat the word list', font='Arial', units='pix', pos=(0, 0), height=45, wrapWidth=None, ori=0, color='white', colorSpace='rgb',
# Return the results return { 'trial': self.name, 'key': key, 'start_time': start_time, 'end_time': end_time } # initialize the experiment experiment = Experiment((800, 600), (-1, -1, -1), (1, 1, 1)) #defined in class Experiment; fill in screen size, text color, background color trials = [] for i in range(len(stimuli)): audio = sound.Sound(path_to_sounds + '/' + stimuli['freq_category'][i] + '/' + stimuli['word'][i]) trial = AudioTrial(experiment, stimuli['freq_category'][i] + '/' + stimuli['word'][i] + '_audio', audio) trials.append(trial) trials = np.random.permutation(trials) # instructions (defined in Experiment, screen and color: default ok; fill in text; stays on screen until button press) experiment.show_message('druk z als je denkt dat wat je hoort een woord is,\ndruk m als je denkt dat het geen bestaand woord is\n\ndruk op een knop om te beginnen') # run through all the trials and save results results = [] for trial in trials: result = trial.run() results.append(result) # Create a dataframe based on the results, and store them to a csv file
def present( save_fn: str = None, duration=120, stim_types=None, itis=None, additional_labels={}, secs=0.07, volume=0.8, eeg=None, ): markernames = [1, 2] record_duration = np.float32(duration) ## Initialize stimuli # aud1 = sound.Sound('C', octave=5, sampleRate=44100, secs=secs) aud1 = sound.Sound(440, secs=secs) # , octave=5, sampleRate=44100, secs=secs) aud1.setVolume(volume) # aud2 = sound.Sound('D', octave=6, sampleRate=44100, secs=secs) aud2 = sound.Sound(528, secs=secs) aud2.setVolume(volume) auds = [aud1, aud2] # Setup trial list trials = DataFrame(dict(sound_ind=stim_types, iti=itis)) for col_name, col_vec in additional_labels.items(): trials[col_name] = col_vec # Setup graphics mywin = visual.Window([1920, 1080], monitor="testMonitor", units="deg", fullscr=True) fixation = visual.GratingStim(win=mywin, size=0.2, pos=[0, 0], sf=0, rgb=[1, 0, 0]) fixation.setAutoDraw(True) mywin.flip() iteratorthing = 0 # start the EEG stream, will delay 5 seconds to let signal settle if eeg: eeg.start(save_fn, duration=record_duration) show_instructions(10) # Start EEG Stream, wait for signal to settle, and then pull timestamp for start point start = time() # Iterate through the events for ii, trial in trials.iterrows(): iteratorthing = iteratorthing + 1 # Inter trial interval core.wait(trial["iti"]) # Select and display image ind = int(trial["sound_ind"]) auds[ind].stop() auds[ind].play() # Push sample if eeg: timestamp = time() if eeg.backend == "muselsl": marker = [additional_labels["labels"][iteratorthing - 1]] marker = list(map(int, marker)) else: marker = additional_labels["labels"][iteratorthing - 1] eeg.push_sample(marker=marker, timestamp=timestamp) mywin.flip() if len(event.getKeys()) > 0: break if (time() - start) > record_duration: break event.clearEvents() if iteratorthing == 1798: sleep(10) # Cleanup if eeg: eeg.stop() mywin.close()
monitor='testMonitor', color=[0, 0, 0], colorSpace='rgb', blendMode='avg', useFBO=True, units='height') # store frame rate of monitor if we can measure it expInfo['frameRate'] = win.getActualFrameRate() if expInfo['frameRate'] != None: frameDur = 1.0 / round(expInfo['frameRate']) else: frameDur = 1.0 / 60.0 # could not measure, so guess # Initialize components for Routine "trial" trialClock = core.Clock() sound_1 = sound.Sound('A', secs=1.0, stereo=True) sound_1.setVolume(1) # Create some handy timers globalClock = core.Clock() # to track the time since experiment started routineTimer = core.CountdownTimer( ) # to track time remaining of each (non-slip) routine # ------Prepare to start Routine "trial"------- t = 0 trialClock.reset() # clock frameN = -1 continueRoutine = True routineTimer.add(1.000000) # update component parameters for each repeat sound_1.setSound('A', secs=1.0)
def test_methods(self): s = sound.Sound(secs=0.1) v = s.getVolume() assert v == 1 assert s.setVolume(0.5) == 0.5
print 'Using %s(with %s) for sounds' % (sound.audioLib, sound.audioDriver) timeWithLabjack = True maxReps = 100 #setup labjack U3 ports = u3.U3() ports.__del__ = ports.close #try to autoclose the ports if script crashes (not working?) #get zero value of FIO6 startVal = ports.getFIOState(6) #is FIO6 high or low? print 'FIO6 is at', startVal, print 'AIN0 is at', ports.getAIN(0) if timeWithLabjack: print 'OS\tOSver\taudioAPI\tPsychoPy\trate\tbuffer\tmean\tsd\tmin\tmax' snd = sound.Sound(1000, secs=0.1) core.wait(2) #give the system time to settle? delays = [] nReps = 0 while True: #run the repeats for this sound server if event.getKeys('q'): core.quit() nReps += 1 #do this repeatedly for timing tests ports.setFIOState(4, 0) #start FIO4 low #draw black square stim.draw() win.flip() if not timeWithLabjack:
extraInfo=expInfo, runtimeInfo=None, originPath=None, savePickle=True, saveWideText=True, dataFileName=filename) # Setup the Window win = visual.Window(size=(280, 150), fullscr=False, screen=0, allowGUI=False, allowStencil=False, monitor='testMonitor', color=[-1.000,-1.000,-1.000], colorSpace='rgb') # Initialize components for Routine "Introduction" IntroductionClock = core.Clock() if usePyo == 1: Intro_Speak = SfPlayer(introFileWav ,speed=1,loop=False) # http://www.iact.umontreal.ca/pyo/manual/SfPlayer.html else: Intro_Speak = sound.Sound(u'A', secs=1.0) # only works with pygame Intro_Speak.setVolume(soundVolume) # only works with pygame intro_textField = visual.TextStim(win=win, ori=0, name='intro_textField', text=u'Intro', font=u'Arial', pos=[0, 0], height=0.3, wrapWidth=None, color=u'white', colorSpace=u'rgb', opacity=1, depth=-1.0) # Initialize components for Routine "trial" trialClock = core.Clock() if usePyo == 1: # Unfinished feature, finish at some point and see if there is an effect for # latency and jitter if playFromWAVs == 1:
k = event.waitKeys() #Present an example of the talker without noise. No response taken. keystext = "Please listen to the Talker Example Sentence" text = visual.TextStim(win, keystext, pos=(0, 0), units='pix') text.draw() win.flip() core.wait(0.5) fname = 'sa1' speechFile = normjoin(talkerPath, 'sa1.wav') info, speech = scipy.io.wavfile.read(speechFile) speech = speech.astype('float32') # set speech to match babble1 RMS matchSpeech = babbleRMS / rms(speech) * speech * db2amp(increaseVolume) scipy.io.wavfile.write(tmpSoundFile, 48000, matchSpeech.astype('int16')) exampleSentence = sound.Sound(tmpSoundFile) exampleSentence.play() core.wait(exampleSentence.getDuration()) #Present an example of the talker without noise. No response taken. keystext = "Please listen to and watch the talker of each sentence. Be ready to type the sentence you hear. If you you are not sure about what you heard, guess. Please attempt to report as much of the sentence you heard as possible. \n \n Press the spacebar to continue." text = visual.TextStim(win, keystext, pos=(0, 0), units='pix') text.draw() win.flip() core.wait(0.5) k = event.waitKeys() dBSNRBabble = initialSNR #Start trial loop for trial in np.arange(numTrials):
stimList, trialList = getTrials(subjNr) #################################### # Start experiment # #################################### showText(fileName="intro.txt") # Instructions showText(fileName="practice.txt") # Practice trials totalTrials = len(stimList) - 3 counter = 0 # Loop per trial for trialNr in stimList: next.setImage(imPath + 'next_press.png') voice = sound.Sound(value=soundPath + trialNr[0]) timesPlayed = 0 endTrial = False startClicks = False setWarning = False startTime = time.getTime() while (not endTrial): drawObjects() win.flip() while mouse.getPressed()[0] == 0: drawObjects() win.flip() if mouse.isPressedIn(square): timesPlayed += 1
def __init__(self): from psychopy import visual, monitors mon = monitors.Monitor( 'SMT') #fetch the most recent calib for this monitor mon.setDistance(60) #further away than normal? # Set psychopy window self.window = visual.Window([WIDTH, HEIGHT], units="pix", monitor=mon, color=BACKGROUND_COLOR, winType='pyglet', fullscr=True) self.window.refreshThreshold = 1 / 60 img_width, img_height = 3620, 1458 self.practice_img = visual.ImageStim( win=self.window, image="Instructions/BasicTargetPatterns.png", units="pix", pos=(0, 0), size=(img_width / 5, img_height / 5)) self.practice_message = u'Practice Trial' self.test_message = u'Test Trial' # Set hint button positions self.button_width, self.button_height = WIDTH / 10, HEIGHT / 16 button_pos_x = PUZZLE_BOARD_WIDTH / 2 + self.button_height # Set up hint button and hint text self.help_button = visual.Rect(self.window, units='pix', pos=(0, -button_pos_x), width=self.button_width, height=self.button_height, fillColor='white', lineColor=None) self.help_text = visual.TextStim(self.window, 'Hint', color='black', units='pix', height=self.button_height / 2, pos=(0, -button_pos_x)) self.continue_button = visual.Rect(self.window, units='pix', pos=(0, 0), width=self.button_width, height=self.button_height, fillColor='white') self.continue_text = visual.TextStim(self.window, 'Continue', color='black', units='pix', height=self.button_height / 2, pos=(0, 0)) # Create fixation cross (shown at start of each difficulty level) self.fixation = visual.TextStim(self.window, text='+', pos=(0.0, 0.0), depth=0, rgb=None, color='black', colorSpace='rgb', opacity=1.0, contrast=1.0, units='', ori=0.0, height=FONTSIZE) self.time = core.Clock() # Load sounds self.gem_sound = sound.Sound(value='sounds/gem.wav') self.match_sound = sound.Sound(value='sounds/match.wav') self.error_sound = sound.Sound(value='sounds/falsch.wav') self.mouse = event.Mouse() self.mouse_down_counter = 0 self.current_polygon_list = [] self.current_hint_mark_list = [] self.puzzle_board_coords = None self.match_three_coords = [] self.polygon_size = None self.hint_x, self.hint_y = None, None self.x_positions = None self.y_positions = None self.clicked_tiles_list = [] self.swapped = False self.cell_size = None self.participant_no = None self.level_number = None self.task_condition = None # Puzzle difficulty level outcomes self.puzzle_trial_number = None self.puzzle_version = None self.puzzle_level_number = None self.puzzle_condition = None # [Training, Test] self.puzzle_width = None self.puzzle_height = None self.puzzle_tile_number = None self.puzzle_tile_size = None self.puzzle_move_number = None self.puzzle_reaction_time = None # time to solve all four moves self.puzzle_difficulty_rating = None self.puzzle_number_errors = None # number of invalid moves self.puzzle_number_moves = None # update number of moves per puzzle (min. 4) self.puzzle_number_hints = None # Puzzle trial level outcomes self.move_trial_number = 0 self.trial_clicked_tile_coords = None self.trial_hint_used = None self.trial_move_type = None # [valid, invalid] self.trial_target_type = None self.trial_target_pattern = None self.trial_target_coordinates = None self.trial_number_distractors = None self.trial_search_time = 0 self.trial_onset_time = 0 self.trial_offset_time = 0 self.trial_file_number = None # output data list self.output_data = []
def present(duration=120): BLUE = (0, 0, 255) BLACK = (0, 0, 0) RED = (255, 0, 0) YELLOW = (255, 255, 0) WHITE = (255, 255, 255) X = 1080 Y = 720 info = StreamInfo('Markers', 'Markers', 1, 0, 'float32', 'myuidw43536') outlet = StreamOutlet(info) #np.random.seed(random_state) #1 is up, 2 is nothing markernames = [1, 2] randnums = np.random.randint(1, 3, 200) iterator = 0 start = time.time() secs = 0.07 # Set up trial parameters record_duration = np.float32(duration) # Initialize audio stimuli aud1 = sound.Sound(440, secs=secs) #, octave=5, sampleRate=44100, secs=secs) aud1.setVolume(0.8) pygame.init() # Setup graphics screen = pygame.display.set_mode((X, Y), pygame.RESIZABLE) while True: event = pygame.event.wait() if event.type == pygame.MOUSEBUTTONDOWN: break while iterator < 200: #or (time.time() - start > record_duration) pygame.init() word = "" wordd = '' if randnums[iterator] == 1: word = "up" wordd = 'up' elif randnums[iterator] == 2: word = "nothing" wordd = 'nothing' iterator = iterator + 1 # set the pygame window name pygame.display.set_caption(word + " - Displaying for second") font = pygame.font.Font(pygame.font.get_default_font(), 36) text_surface = font.render(wordd, 1, pygame.Color('white')) screen.blit(text_surface, dest=(540, 360)) pygame.display.flip() pygame.display.update() pygame.event.get() pygame.time.wait(1000) pygame.draw.rect(screen, BLACK, (0, 0, X, Y)) pygame.draw.circle(screen, BLUE, (540, 360), 5) pygame.display.update() # wait 4s pygame.display.set_caption( "Wait 4 seconds for the beep and think about the direction presented" ) pygame.event.get() pygame.time.wait(4000) # present dot/crosshair to focus on for 5 seconds and play sound pygame.display.set_caption( "Displaying, please attend to the direction") pygame.draw.circle(screen, RED, (540, 360), 5) pygame.display.update() aud1.stop() aud1.play() timestamp = time.time() outlet.push_sample([randnums[iterator]], timestamp) pygame.event.get() pygame.time.wait(1000) pygame.display.set_caption("Done, take 3 seconds to rest") pygame.draw.rect(screen, BLACK, (0, 0, X, Y)) pygame.display.update() pygame.event.get() pygame.time.wait(3000) # Cleanup pygame.quit() exit() return trials
def run(subjectID, subjectAge, subjectGender, date): ### ### Experiment data ### cwd = os.getcwd() output_dir = os.path.join(cwd, "output") sub_id = subjectID subject_dir = os.path.join(output_dir, str(sub_id)) # setting up sub info for the first time if not os.path.exists(subject_dir): sub_dict = {} os.mkdir(subject_dir) sub_dict["Age"] = str(subjectAge) sub_dict["Gender"] = subjectGender sub_dict["Date"] = str(date) sub_dict_path = os.path.join(subject_dir, 'subject_info.json') with open(sub_dict_path, 'w') as f: json.dump(sub_dict, f, sort_keys=True, indent=4) # get number of runs: num_runs = 20 ### ### Do all the setting up ### #create a window mywin = visual.Window([1000, 750], color=(255, 255, 255), monitor="testMonitor") #keep track of the mouse mouse = event.Mouse(visible=True) buttons = mouse.getPressed() #the rating scale(s): valence and arousal mark = visual.TextStim(mywin, text='|', color=(0, 0, 0), colorSpace='rgb255') valenceRatingScale = visual.RatingScale(mywin, low=1, high=200, marker=mark, markerColor='Black', scale=None, tickMarks=None, tickHeight=0, labels=('Negative', 'Positive'), showValue=False, lineColor='LightGray', stretch=2.5, markerExpansion=0.5, textColor='Black', showAccept=False, pos=(0, -0.3), textSize=0.6) arousalRatingScale = visual.RatingScale(mywin, low=1, high=200, marker=mark, markerColor='Black', scale=None, tickMarks=None, tickHeight=0, labels=('Low energy', 'High Energy'), showValue=False, lineColor='LightGray', stretch=2.5, markerExpansion=0.5, textColor='Black', showAccept=False, pos=(0, -0.5), textSize=0.6) next_button_text = visual.TextStim(mywin, text="Next", color=(0, 0, 0), colorSpace='rgb255', pos=(0, -280), height=20, units='pix') next_button = visual.Rect(mywin, width=150, height=50, units='pix', lineColor=(0, 0, 0), lineColorSpace='rgb255', pos=(0, -280), fillColor=(255, 255, 255), fillColorSpace='rgb255') # the play button for sounds play_button_text = visual.TextStim(mywin, text="Click play button to play sound", color=(0, 0, 0), colorSpace='rgb255', pos=(0, 0.2), height=0.05) button_vertices = [[-20, 33], [-20, -13], [20, 10]] play_button = visual.ShapeStim(mywin, units='pix', vertices=button_vertices, lineColor=(0, 0, 0), lineColorSpace='rgb255', pos=(0, 0), fillColor=(255, 255, 255), fillColorSpace='rgb255') # Set the stimulus directory stimulus_dir = os.path.join(os.path.dirname(cwd), 'STIMULI') ### ### Show instruction screen ### instructions = ( "In the following task, you will be presented with some visual" + " or auditory stimuli. Click and drag along the scales at the" + " bottom of the screen to reflect how negative or positive and" + " how low or high energy the video or sound is.\n\n\n" + " Click the button to start") instruction_text = visual.TextStim(mywin, text=instructions, color=(0, 0, 0), colorSpace='rgb255', pos=(0, 100), height=20, units='pix', wrapWidth=500) continue_text = visual.TextStim(mywin, text="Start", color=(0, 0, 0), colorSpace='rgb255', pos=(0, -50), height=20, units='pix') continue_button = visual.Rect(mywin, width=150, height=50, units='pix', lineColor=(0, 0, 0), lineColorSpace='rgb255', pos=(0, -50), fillColor=(255, 255, 255), fillColorSpace='rgb255') ready = False while not ready: instruction_text.draw() continue_button.draw() continue_text.draw() mywin.flip() if mouse.isPressedIn(continue_button, buttons=[0]): continue_button.setFillColor(color=(225, 225, 225), colorSpace='rgb255') instruction_text.draw() continue_button.draw() continue_text.draw() mywin.flip() core.wait(0.2) ready = True ### ### Do multiple runs ### for run in range(num_runs): order_data_path = os.path.join( subject_dir, 'videoPresentationOrder_run' + str(run) + '.json') order_data = open(order_data_path, 'w') stim_dict = {} stim_response_path = os.path.join( subject_dir, 'videoRatings_run' + str(run) + '.json') stim_response = open(stim_response_path, 'w') response_dict = {} # Pick the order of the images and the sounds video_binOrder = random.sample(range(4), 4) sound_binOrder = random.sample(range(7), 7) # Randomly picking the trials to show videos videoIndices = set(random.sample(range(11), 4)) vidCount = 0 soundCount = 0 ### ### Do the drawings ### for trial in range(11): if trial in videoIndices: mode = "vid" # pick a video file video_bin = VIDEOBINS[video_binOrder[vidCount]] video_dir = os.path.join(stimulus_dir, "videos", video_bin) video_file = os.path.join(video_dir, random.choice(os.listdir(video_dir))) # making the stimuli clip = visual.MovieStim(mywin, video_file, loop=True, units='pix', pos=(0, 120), size=(800, 400)) # adding files presented to dictionary stim_dict[trial] = video_file vidCount += 1 else: mode = "sound" # pick a video file sound_bin = SOUNDBINS[sound_binOrder[soundCount]] sound_dir = os.path.join(stimulus_dir, "sounds", sound_bin) sound_file = os.path.join(sound_dir, random.choice(os.listdir(sound_dir))) # making the stimuli soundClip = sound.Sound(sound_file, secs=2) # adding files presented to dictionary stim_dict[trial] = sound_file soundCount += 1 soundPlayed = False # reset things: rating = False #movie timer if mode == "vid": timer = core.CountdownTimer(clip.duration) # draw and wait for response while rating == False: if mode == "vid": if timer.getTime() == 0: clip = visual.MovieStim(mywin, video_file, loop=True, units='pix', pos=(0, 120), size=(800, 400)) timer.reset(clip.duration) clip.draw() if mode == "sound": play_button = visual.ShapeStim(mywin, units='pix', vertices=button_vertices, lineColor=(0, 0, 0), lineColorSpace='rgb255', pos=(0, 0), fillColor=(255, 255, 255), fillColorSpace='rgb255') play_button_text.draw() play_button.draw() valenceRatingScale.draw() arousalRatingScale.draw() next_button.setFillColor(color=(255, 255, 255), colorSpace='rgb255') next_button.draw() next_button_text.draw() mywin.flip() if mouse.isPressedIn(play_button, buttons=[0]): play_button = visual.ShapeStim(mywin, units='pix', vertices=button_vertices, lineColor=(0, 0, 0), lineColorSpace='rgb255', pos=(0, 0), fillColor=(225, 225, 225), fillColorSpace='rgb255') play_button.draw() play_button_text.draw() valenceRatingScale.draw() arousalRatingScale.draw() next_button.draw() next_button_text.draw() mywin.flip() mouse.clickReset() core.wait(0.2) soundClip.play() soundPlayed = True if mouse.isPressedIn(next_button, buttons=[0]): next_button.setFillColor(color=(225, 225, 225), colorSpace='rgb255') next_button.draw() next_button_text.draw() if mode == "vid": clip.draw() if mode == "sound": play_button.draw() play_button_text.draw() valenceRatingScale.draw() arousalRatingScale.draw() mywin.flip() mouse.clickReset() core.wait(0.2) if mode == "vid" or soundPlayed == True: if valenceRatingScale.getRating( ) and arousalRatingScale.getRating(): rating = True finalValenceRating = valenceRatingScale.getRating( ) / 2 finalArousalRating = arousalRatingScale.getRating( ) / 2 #if sound is still playing, stop if mode == "sound": soundClip.stop() # add response to dictionary, whether or not heard sound response_dict[trial] = [finalValenceRating, finalArousalRating] valenceRatingScale.reset() arousalRatingScale.reset() # clean the window mywin.flip() ### ### write data to files ### json.dump(stim_dict, order_data, sort_keys=True, indent=4) json.dump(response_dict, stim_response, sort_keys=True, indent=4) finish_text = "End" finish = visual.TextStim(mywin, text=finish_text, color=(0, 0, 0), colorSpace='rgb255', pos=(0, 0), height=0.075) finish.draw() mywin.flip() core.wait(5) #cleanup mywin.close() order_data.close() stim_response.close() core.quit()
'You will hear a sentence and will be asked to respond to the last word in the sentence. \n\n\nPRESS ANY KEY TO CONTINUE', font='Arial', pos=(0, 0), height=0.05, wrapWidth=None, ori=0, color='white', colorSpace='rgb', opacity=1, languageStyle='LTR', depth=0.0) key_resp_instruc = keyboard.Keyboard() # Initialize components for Routine "Trial" TrialClock = core.Clock() sound_1 = sound.Sound('A', secs=-1, stereo=True, hamming=True, name='sound_1') sound_1.setVolume(1) rating = visual.RatingScale(win=win, name='rating', marker='triangle', size=1.0, pos=[0.0, -0.4], choices=["'correct'", "'incorrect'"], tickHeight=-1) # Create some handy timers globalClock = core.Clock() # to track the time since experiment started routineTimer = core.CountdownTimer( ) # to track time remaining of each (non-slip) routine # ------Prepare to start Routine "Instructions"-------
CONF["trigger"]["labels"]) # Start showing experiment screen screen = Screen(CONF) # initiate system for saving data datalog = Datalog(OUTPUT_FOLDER=os.path.join( 'output', CONF["participant"] + "_" + CONF["session"]), CONF=CONF) # This is for saving data TODO: apply everywhere # initiate psychopy stuff kb = keyboard.Keyboard() mainClock = core.MonotonicClock() # starts clock for timestamping events alarm = sound.Sound(os.path.join('sounds', CONF["instructions"]["alarm"]), stereo=True) questionnaireReminder = sound.Sound(os.path.join( 'sounds', CONF["instructions"]["questionnaireReminder"]), stereo=True) scorer = Scorer() logging.info('Initialization completed') ######################################################################### # function for quitting def quitExperimentIf(shouldQuit): "Quit experiment if condition is met"
def __init__(self, tracker, win): '''Initialize a Custom EyeLinkCoreGraphics tracker: an eye-tracker instance win: the Psychopy display we plan to use for stimulus presentation ''' pylink.EyeLinkCustomDisplay.__init__(self) self.pylinkMinorVer = pylink.__version__.split('.')[ 1] # minor version 1-Mac, 11-Win/Linux self.display = win self.w, self.h = win.size # on Macs with HiDPI screens, force the drawing routine to use the size defined # in the monitor instance, as win.size will give the wrong size of the screen if os.name == 'posix': self.w, self.h = win.monitor.getSizePix() #self.display.autoLog = False # check the screen units of Psychopy, forcing the screen to use 'pix' self.units = win.units if self.units != 'pix': self.display.setUnits('pix') # Simple warning beeps self.__target_beep__ = sound.Sound('A', octave=4, secs=0.1) self.__target_beep__done__ = sound.Sound('E', octave=4, secs=0.1) self.__target_beep__error__ = sound.Sound('E', octave=6, secs=0.1) self.imgBuffInitType = 'I' self.imagebuffer = array.array(self.imgBuffInitType) self.resizeImagebuffer = array.array(self.imgBuffInitType) self.pal = None self.bg_color = win.color self.img_scaling_factor = 3 self.size = (192 * self.img_scaling_factor, 160 * self.img_scaling_factor) # initial setup for the mouse self.display.mouseVisible = False self.mouse = event.Mouse(visible=False) self.last_mouse_state = -1 # image title & calibration instructions self.msgHeight = self.size[1] / 20.0 self.title = visual.TextStim(self.display, '', height=self.msgHeight, color=[1, 1, 1], pos=(0, -self.size[1] / 2 - self.msgHeight), wrapWidth=self.w, units='pix') self.calibInst = visual.TextStim( self.display, alignHoriz='left', alignVert='top', height=self.msgHeight, color=[1, 1, 1], pos=(-self.w / 2.0, self.h / 2.0), units='pix', text='Enter: Show/Hide camera image\n' + 'Left/Right: Switch camera view\n' + 'C: Calibration\n' + 'V: Validation\n' + 'O: Start Recording\n' + '+=/-: CR threshold\n' + 'Up/Down: Pupil threshold\n' + 'Alt+arrows: Search limit') # lines for drawing cross hair etc. self.line = visual.Line(self.display, start=(0, 0), end=(0, 0), lineWidth=2.0, lineColor=[0, 0, 0], units='pix') self.calTarget = 'default' # could be 'default', 'rotatingCheckerboard', 'movie' self.animatedTarget = False # this is like a switch, when it's turned on the animated target is displayed self.calibTar = visual.Circle(self.display, radius=15, lineWidth=8, lineColor='black', fillColor='white', units='pix') self.movieTargetFile = None # set a few tracker parameters self.tracker = tracker self.tracker.setOfflineMode() self.tracker_version = tracker.getTrackerVersion() if self.tracker_version >= 3: self.tracker.sendCommand("enable_search_limits=YES") self.tracker.sendCommand("track_search_limits=YES") self.tracker.sendCommand("autothreshold_click=YES") self.tracker.sendCommand("autothreshold_repeat=YES") self.tracker.sendCommand("enable_camera_position_detect=YES") # let the tracker know the correct screen resolution being used self.tracker.sendCommand("screen_pixel_coords = 0 0 %d %d" % (self.w - 1, self.h - 1))
radius=50, fillColor=black, lineColor=black, pos=[0, 0], name="small") #Visual stimulus lists completeList = [[circleSMALL, circleBIG], [circleLOW, circleHIGH], [circleHIGH, circleSMALL], [circleLOW, circleBIG] ] * (nTrials / 4) random.shuffle(completeList) #Auditory stimuli highSound = sound.Sound('A', octave=5, sampleRate=44100, secs=0.8, stereo=True, name="high") lowSound = sound.Sound('A', octave=2, sampleRate=44100, secs=0.8, stereo=True, name="low") completeSounds = [highSound, lowSound] * (nTrials / 2) random.shuffle(completeSounds) for i in range(0, nTrials): trial = i + 1 print(i) win.flip()
if useDB: sessionID=startExp(expName,createTableStatement,dbConf) else: sessionID=1 window=visual.Window(units= "pix", size =(1024,768), rgb = "black", fullscr = True,) mouse = event.Mouse(visible=False) timer = core.Clock() seed = random.randrange(1e6) rng = random.Random(seed) ####################### # Feedback Global Settings abortKey='9' correct1=sound.Sound(500,secs=.1) correct2=sound.Sound(1000,secs=.1) error=sound.Sound(300,secs=.3) wrongKey=sound.Sound(100,secs=1) wrongKeyText=visual.TextStim(window, text = "Invalid Response\nRepostion Hands\nPress space to continue", pos = (0,0)) def getFeedbackText(correct, score, nTrials): if (correct==True): string=visual.TextStim(window, text = "Correct!\n\nScore: " + str(score) + "/" + str(nTrials), pos = (0,0)) if (correct==False): string=visual.TextStim(window, text = "Incorrect!\n\nScore: " + str(score) + "/" + str(nTrials), pos = (0,0)) return(string) ######################## # Other Globals fpP=.35
colorSpace='rgb', opacity=1, depth=-1.0) main_fix = visual.TextStim(win=win, name='main_fix', text='+', font='Arial', pos=(0, 0), height=74.45, wrapWidth=None, ori=0, color='white', colorSpace='rgb', opacity=1, depth=-2.0) main_sound = sound.Sound('A', secs=-1) main_sound.setVolume(1) main_image = visual.ImageStim(win=win, name='main_image', image='sin', mask=None, ori=0, pos=[0, 0], size=(312, 399), color=[1, 1, 1], colorSpace='rgb', opacity=1, flipHoriz=False, flipVert=False, texRes=128, interpolate=True,
am = am[:len(t)] elif am_type == 'sine': am = np.sin(2 * np.pi * am_freq * t) carrier = 0.5 * np.sin(2 * np.pi * carrier_freq * t) + 0.5 am_out = carrier * am return am_out # Generate stimuli am1 = generate_am_waveform(900, 45, secs=soa, sample_rate=44100) am2 = generate_am_waveform(770, 40.018, secs=soa, sample_rate=44100) aud1 = sound.Sound(am1) aud1.setVolume(0.8) aud2 = sound.Sound(am2) aud2.setVolume(0.8) auds = [aud1, aud2] mywin.flip() for ii, trial in trials.iterrows(): # Intertrial interval core.wait(iti + np.random.rand() * jitter) # Select stimulus frequency ind = trials['stim_freq'].iloc[ii] auds[ind].play()
# -*- coding: utf-8 -*- #!/usr/bin/env python2 ''' by 何吉波@优视眼动科技. 代码用于播放声音刺激。这个代码版可能只适用于Windows操作系统 ''' from psychopy import sound, core tada = sound.Sound('tada.wav') tada.play() core.wait(2) core.quit()
'Which Tone First': 'Standard', 'Tone Volume (0-1)': 0.5, 'Session': '' } # Add todays date to the experiment info expInfo['dateStr'] = data.getDateStr() # Save parameter file (or quit if subject presses cancel) dlg = gui.DlgFromDict(expInfo, title='Pitch Discrimination - MCS', fixed=['dateStr']) # Volume (arbitrary units) range for test testRange = numpy.linspace(0.1, 1.0, 10) # Test tone (1000 Hz, 1 s) testTone = sound.Sound(1000, expInfo['Tone Duration']) # Find maximum and minimum comparison frequencies, given the Standard and Comparison Interval maxComparison = expInfo['Standard Frequency (Hz)'] + expInfo[ 'Comparison Interval'] * (numFrequencies // 2) minComparison = expInfo['Standard Frequency (Hz)'] - expInfo[ 'Comparison Interval'] * (numFrequencies // 2) # Create array of comparison frequencies to be used for experiment comparisonFreqs = numpy.linspace(minComparison, maxComparison, numFrequencies) # Build array of all comparison tones to be tested (size = numFrequencies*N trials/comparison). Then randomize the tone order in the array comparisonFreq = [] for i in range(expInfo['N trials/comparison']): comparisonFreq = numpy.append(comparisonFreq, comparisonFreqs) random.shuffle(comparisonFreq) # Create data file to write data to
trialList=data.importConditions( os.path.join(_thisDir, '..', '..', SRTPath, 'WordList.csv'), selection=ThisBlockSelList), seed=None, name='trials') thisExp.addLoop(trials) # add the loop to the experiment thisTrial = trials.trialList[ 0] # so we can initialise stimuli with some values # Make a list of all of the sounds SoundList = [] for i in trials.trialList: print(i['Word']) TempSound = sound.Sound(os.path.join(_thisDir, '..', '..', SoundPath, '%s.wav' % (i['Word'])), secs=1.0) TempSound.setVolume(1) SoundList.append(TempSound) print('Loaded sound files') # How many words in the test list? NWords = len(trials.trialList) print("There are %d words" % (NWords)) # Add the blank dummy to the end when someone recalls all WordsToRemove TempSound = sound.Sound(os.path.join(_thisDir, '..', '..', SoundPath, '%s.wav' % ('BLANK')), secs=1.0) TempSound.setVolume(1) SoundList.append(TempSound) # create word list
ori=0, name='text_empty2', text=None, font=u'Arial', pos=[0, 0], height=0.1, wrapWidth=None, color=u'white', colorSpace='rgb', opacity=1, depth=-1.0) # Initialize components for Routine "trial" trialClock = core.Clock() import random definitions = sound.Sound('A', secs=-1) definitions.setVolume(1) # Initialize components for Routine "delay" delayClock = core.Clock() text_empty3 = visual.TextStim(win=win, ori=0, name='text_empty3', text=None, font=u'Arial', pos=[0, 0], height=16, wrapWidth=None, color=u'white', colorSpace='rgb', opacity=1,
def __init__(self, **kwargs): self.DIGIT_DISPLAY_TIME = kwargs.get('digit_display_time', 0.250) self.DIGIT_RANGE = kwargs.get('digit_range', (0, 9)) self.DIGIT_SIZES = kwargs.get('digit_sizes', [1.8, 2.7, 3.5, 3.8, 4.5]) self.TARGET_DIGIT = kwargs.get('target_digit', random.randint(*self.DIGIT_RANGE)) self.NUM_DIGIT_SETS = kwargs.get('num_digit_sets', 25) self.MASK_TIME = kwargs.get('mask_time', 0.900) self.MASK_DIAMETER = kwargs.get('mask_diameter', 3.0) self.MAX_FAILS = kwargs.get('max_fails', 3) self.CORRECT_FREQ = kwargs.get('correct_freq', 440) self.WRONG_FREQ = kwargs.get('wrong_freq', 330) self.TONE_LENGTH = kwargs.get('tone_length', 0.5) self.SOUND_INIT_SAMPLES = kwargs.get('sound_init_samples', 48000) self.PRACTICE_DIGIT_SETS = kwargs.get('practice_digit_sets', 2) self.DATA_DIR = kwargs.get('data_dir', 'sart_data') self.MONITOR_RESOLUTION = kwargs.get('monitor_resolution', (1024, 768)) self.FULLSCREEN = kwargs.get('fullscreen', True) # if the datadir doesn't exist, create it. if not os.path.isdir(self.DATA_DIR): try: os.mkdir(self.DATA_DIR) except Exception as e: print e.getMessage() print "Error: cannot create data directory: " + self.DATA_DIR sys.exit(1) # then, collect the subject's ID and text number. If the file already exists, prompt to confirm overwrite while True: subject_info = self.get_subject_info(sys.argv[1:]) self.log_file = os.path.join(self.DATA_DIR, '_'.join(subject_info) + '.csv') if os.path.isfile(self.log_file): rename_dialog = gui.Dlg(title='Error: Log File Exists') rename_dialog.addText( 'A log file with this subject id ({0}) and test number {1} already exists. Overwrite?' .format(*subject_info)) rename_dialog.show() if rename_dialog.OK: break else: break else: break #self.log_file = open(self.log_file, "w") self.data = [] # this is the basic data output format (to CSV) self.Datum = namedtuple( 'Datum', ['trial', 'target', 'digit', 'success', 'rt', 'note']) sound.init(self.SOUND_INIT_SAMPLES, buffer=128) # init components for rest of experiment self.sound_correct = sound.Sound(value=self.CORRECT_FREQ, secs=self.TONE_LENGTH) self.sound_incorrect = sound.Sound(value=self.WRONG_FREQ, secs=self.TONE_LENGTH) self.window = visual.Window(self.MONITOR_RESOLUTION, monitor='testMonitor', units='cm', fullscr=self.FULLSCREEN) self.mouse = event.Mouse(win=self.window) self.MASTER_CLOCK = core.Clock( ) # this is never used, holdover from original code self.TIMER = core.Clock()
fixation = visual.GratingStim(win, pos=(0, 0), tex='sin', mask='circle', size=10, texRes=20, sf=0) sound_files = glob.glob( os.path.join( '/Users/beauchamplab/Documents/jwdegee/repos/UvA_experiments/sounds/', '*.wav')) sounds = {} for sf in sound_files: sound_name = os.path.splitext(os.path.split(sf)[-1])[0] sound_var = sound.Sound(sf) sounds.update({sound_name: sound_var}) target = sounds['TORC_TARGET'] noise = sounds['TORC_424_02_h501'] # and some handy clocks to keep track of time globalClock = core.Clock() trialClock = core.Clock() # display instructions and wait message1 = visual.TextStim( win, text="""Signal present? Press 'f' for 1st and 'j' for 2nd interval.""", pos=[0, 0], font='Helvetica Neue', italic=True,
""" For sound use, I really recommend installing pygame (v1.8 or later). For users of the intel-Mac app bundle you already have it. Pyglet will play sounds, but I find them unpredictable in timing and (sometimes they don't seem to play at all. :-( PsychoPy sound handling with pygame is not ideal, with a latency of 20-30ms, but at least with pygame it is robust - all sounds play consistently. I hope one day to write a better, low-level handler for playing sounds directly from the drivers (e.g. CoreAudio, DirectSound, ASIO), but for now, pygame will have to do. """ import sys from psychopy import sound,core, visual highA = sound.Sound('A',octave=3, sampleRate=22050, secs=0.8, bits=8) highA.setVolume(0.8) tick = sound.Sound(800,secs=0.01,sampleRate=44100, bits=8)#sample rate ignored because already set tock = sound.Sound('600',secs=0.01) highA.play() core.wait(0.8) tick.play() core.wait(0.4) tock.play() core.wait(0.2) if sys.platform=='win32': ding = sound.Sound('ding') ding.play()
gramAVisualFiles = os.listdir(gramAVisualDir) gramBVisualFiles = os.listdir(gramBVisualDir) blockFile = 'stimuli/metadata/order_gramB.txt' gramAStimsVisual = [ visual.ImageStim(win, gramAVisualDir + filename) for filename in gramAVisualFiles ] gramBStimsVisual = [ visual.ImageStim(win, gramBVisualDir + filename) for filename in gramBVisualFiles ] gramAStimsAudio = [ sound.Sound(gramAAudioDir + filename) for filename in gramAAudioFiles ] gramBStimsAudio = [ sound.Sound(gramBAudioDir + filename) for filename in gramBAudioFiles ] fixation = visual.ImageStim(win, stimuliDir + 'fix.svg.png') data = [] ###FUNCTION DEFINITIONS### def parseBlocks(blockFile): #Data Types blockTemplate = {'id': 0, 'bites': []} biteTemplate = {'grammar': '', 'symbols': []}