def make_stim( path, file_type, win ): #function to convert file_type tags into Psychopy containers - handles file_types if file_type == 'movie': return visual.MovieStim(win, path) elif file_type == 'movie_loop': return visual.MovieStim(win, path, loop=True) elif file_type == 'audio': return sound.Sound(path) elif file_type == 'image': return visual.ImageStim(win, path)
def __init__(self, window, movie, destination_path, movie_dimensions=(1, 1), units='norm', tick_marks=[1, 2, 3, 4, 5, 6, 7, 8, 9], rating_description='Very negative . . . Very positive', header_text=None, header_size=0.15, stretch_horizontal=2.7, marker_style='triangle', marker_color='White', marker_start=5, low=1, high=9, pos=None, button_box=None, *args, **kwargs): super(VideoRating, self).__init__(window) # FIXME: video should mantain aspect ratio regardless of window dimensions self.mov = visual.MovieStim(self.window, movie, size=movie_dimensions, units=units, flipVert=False, loop=False) # Header text if header_text: self.header_text = visual.TextStim( self.window, text=header_text, pos=(0, 0.7), height=header_size, wrapWidth=2.0, # ?? units=units) else: self.header_text = None self.rating_scale = visual.RatingScale(self.window, low=low, high=high, tickMarks=tick_marks, precision=1, pos=(0, -0.75), stretchHoriz=stretch_horizontal, showAccept=False, acceptKeys=[None], markerStyle=marker_style, markerColor=marker_color, markerStart=marker_start, *args, **kwargs) self.rating_scale.setDescription(rating_description) # The destination path to write the history to self.dest = destination_path self.button_box = button_box
def playclip(stimpath, stim): fixation() core.wait(0.3) #pparallel.setData(0) # sets all pin lo clip = visual.MovieStim(win=win, name='clip', filename=stimpath + stim, size=[800, 450], ori=0, pos=[0, 0], opacity=1, depth=-1.0) pparallel.setData(trigger_stim) # sets all pin lo core.wait(0.005) pparallel.setData(0) stimOnset = trialClock.getTime() while clip.status != visual.FINISHED: clip.draw() win.flip() fixation() # get key press at the end of clip event.waitKeys(keyList=keyStop) respTime = trialClock.getTime() #mouse.clickReset() #button, time = mouse.getPressed(getTime=True) #print('mouse: ', button) #event.waitKeys(keyList= button) trials.addData('stimOnset', stimOnset) trials.addData('respTime', respTime)
def __init__(self, w, h, tracker, window, reward, target_color=1, target_size=20, target_image=None, use_gabor=False, movie_stim=None): psychocal.psychocal.__init__(self, w, h, tracker, window) self.reward = reward self.duration = reward.duration self.phase = 0.0 self.animate = False self.use_gabor = use_gabor self.correct_fixation = False self.use_movie = False self.tcolor = target_color if movie_stim is not None: self.targetout = visual.MovieStim(self.window, movie_stim, flipVert=False, size=target_size, pos=(0, 0)) self.use_movie = True elif target_image is None: if not use_gabor: self.targetout = visual.Circle(self.window, pos=(0, 0), radius=target_size, fillColor=self.tcolor, lineColor=self.tcolor, units='pix', fillColorSpace='rgb', lineColorSpace='rgb') else: self.targetout = visual.GratingStim(self.window, mask='gauss', units="pix", sf=5 / target_size, ori=60.0, size=target_size, color=target_color, colorSpace='rgb') else: self.targetout = visual.ImageStim(self.window, target_image, size=target_size, units="pix") #override sound settings to use reward duration self.__target_beep__ = sound.Sound(800, secs=self.duration) self.__target_beep__done__ = sound.Sound(1200, secs=self.duration) self.__target_beep__error__ = sound.Sound(400, secs=self.duration)
def setMovie(win, video): movie = visual.MovieStim(win, video, size=(1280, 720), flipVert=False, flipHoriz=False, loop=False) print('orig movie size=%s' % movie.size) print('duration=%.2fs' % movie.duration) return movie
def natural_movies(movie, blank_time, windows): movie_stimuli = [] for window in windows: movie_stimuli.append( visual.MovieStim(window, filename=movie, volume=0.0, loop=False, opacity=0.0, size=(window.size[0], window.size[1]), name='movie_stimulus', autoLog=False, units='pix')) for movie_stimulus in movie_stimuli: movie_stimulus.opacity = 1.0 clock = core.Clock() while movie_stimulus.duration - clock.getTime() >= 0.1: for movie_stimulus in movie_stimuli: movie_stimulus.draw() for window in windows: window.flip() if event.getKeys(keyList=['escape']): return 'quit' if event.getKeys(keyList=['space']): return 'skip' for movie_stimulus in movie_stimuli: movie_stimulus.opacity = 0.0 movie_stimulus.draw() for window in windows: window.flip() start = clock.getTime() end = clock.getTime() while end - start <= blank_time: end = clock.getTime() if event.getKeys(keyList=['escape']): return 'quit' if event.getKeys(keyList=['space']): return 'skip' return 'continue'
def run_instructions(self, win, task): "Display the instructions for the game." aud_inst_path = 'Audio/Instructions/' if task == 'choice': sz = [1500, 850] else: sz = [1250, 700] instructions = visual.MovieStim(win=win, filename=aud_inst_path + task + '_instructions.mp4', size=sz, flipHoriz=False) #[1500,850] audio_inst = sound.Sound(aud_inst_path + task + '_instructions.wav') #display instructions and wait audio_inst.play() trialClock = core.Clock() double_click, double_time, double_time2, double_time3 = False, None, None, None while instructions._player.time <= int(instructions.duration): key = event.getKeys() instructions.draw() win.flip() if key == ['escape']: if audio_inst: audio_inst.stop() print 'QUITing {} instructions...'.format(task) return 'QUIT' #check for triple click if double_time and trialClock.getTime() - double_time >= 1: double_click, double_time = False, None elif double_time2 and trialClock.getTime() - double_time2 >= 1: double_click, doublet_time, double_time2 = False, None, None if double_click == False and key == ['period']: double_click = 'maybe' double_time = trialClock.getTime() elif double_click == 'maybe' and key == ['period']: double_time2 = trialClock.getTime() if double_time2 - double_time > 1: double_click, double_time, double_time2 = False, None, None elif double_time2 - double_time <= 1: double_click = 'yes' elif double_click == 'yes' and key == ['period']: double_time3 = trialClock.getTime() if double_time3 - double_time2 > 1: double_click, double_time, double_time2, double_time3 = False, None, None, None elif double_time3 - double_time2 <= 1: return 'QUIT' win.flip()
def test_mov(self): win = self.win if self.win.winType=='pygame': pytest.skip("movies only available for pyglet backend") win.flip() #construct full path to the movie file fileName = os.path.join(utils.TESTS_DATA_PATH, 'testMovie.mp4') #check if present if not os.path.isfile(fileName): raise IOError('Could not find movie file: %s' % os.path.abspath(fileName)) #then do actual drawing mov = visual.MovieStim(win, fileName) for frameN in range(10): mov.draw() win.flip()
def video(): testWin = visual.Window( size=(1280, 800), monitor="tobiiMonitor", units="pix", screen=0, fullscr=True, color=(-1, -1, -1), waitBlanking=False) drawBorder(testWin, 'line') mov = visual.MovieStim(testWin, filename='images/display/Cats_Being_Jerks.mp4', units='norm', size=(1.0, 1.0)) while mov.status != visual.FINISHED: mov.draw() testWin.flip() if event.getKeys(keyList=['escape','q']): testWin.close() core.quit() testWin.close()
def __init__(self, window, movie, movie_dimensions=None, *args, **kwargs): '''Constructor for the Video stimulus. Arguments: movie - A filename (string) for the video file. movie_dimensions - Movie dimensions. If not specified, defaults to 50\% of the window area. ''' self.window = window movie_dims = None if movie_dimensions: movie_dims = movie_dimensions else: # Default movie to half of the window area movie_dims = (self.window.size[0] / 2, self.window.size[1] / 2) self.mov = visual.MovieStim(self.window, movie, size=movie_dims, flipVert=False, loop=False, *args, **kwargs)
def test_mov(self): win = self.win if self.win.winType=='pygame': pytest.skip("movies only available for pyglet backend") win.flip() #construct full path to the movie file fileName = os.path.join(utils.TESTS_DATA_PATH, 'testMovie.mp4') #check if present if not os.path.isfile(fileName): raise IOError('Could not find movie file: %s' % os.path.abspath(fileName)) #then do actual drawing pos = [0.6*self.scaleFactor, -0.6*self.scaleFactor] mov = visual.MovieStim(win, fileName, pos=pos) mov.setFlipVert(True) mov.setFlipHoriz(True) for frameN in range(10): mov.draw() if frameN==0: utils.compareScreenshot('movFrame1_%s.png' %(self.contextName), win) win.flip() str(mov) #check that str(xxx) is working
def playmask(stimpath, stim): vismask = visual.MovieStim( win=win, filename=stimpath + stim, #image = '/home/claire/Documents/Experiment/Imagery/Clips/Animal/s_frame_cat' + str(n) +'.png', pos=[0, 0], size=[800, 450], opacity=1, units='pix') pparallel.setData(0) # sets all pin lo pparallel.setData(trigger_mask) # sets all pin lo core.wait(0.005) pparallel.setData(0) # win.logOnFlip('parallel port trigger mask: %d' %trigger_mask , level=logging.EXP) maskOnset = trialClock.getTime() while vismask.status != visual.FINISHED: vismask.draw() win.flip() trials.addData('maskOnset', maskOnset)
from psychopy import visual, event import numpy win = visual.Window([600, 600], rgb=-1) gabor = visual.GratingStim(win, mask='gauss', pos=[-0.5, -0.5], color=[0, 0, 1], sf=5, ori=30) movie = visual.MovieStim(win, 'jwpIntro.mov', units='pix', pos=[100, 100], size=[160, 120]) text = visual.TextStim(win, pos=[0.5, -0.5], text=u"unicode (eg \u03A8 \u040A \u03A3)", font=['Times New Roman']) faceRGB = visual.ImageStim(win, image='face.jpg', pos=[-0.5, 0.5]) fixSpot = visual.GratingStim(win, tex=None, mask="gauss", size=(0.05, 0.05), color='white') myMouse = event.Mouse(win=win) t = 0.0 while not event.getKeys(keyList=['escape', 'q']): #get mouse events mouse_dX, mouse_dY = myMouse.getRel()
Demo of MovieStim MovieStim opens a video file and displays it on a window. """ from psychopy import visual, core, event, constants # window to present the video win = visual.Window((800, 600), fullscr=False) # create a new movie stimulus instance mov = visual.MovieStim( win, 'default.mp4', # path to video file size=(256, 256), flipVert=False, flipHoriz=False, loop=True, noAudio=False, volume=0.1, autoStart=False) # print some information about the movie print('orig movie size={}'.format(mov.frameSize)) print('orig movie duration={}'.format(mov.duration)) # instructions instrText = "`s` Start/Resume\n`p` Pause\n`r` Restart\n`q` Stop and Close" instr = visual.TextStim(win, instrText, pos=(0.0, -0.75)) # main loop while mov.status != constants.FINISHED:
font=u'Arial', pos=[0, 0], height=0.5, wrapWidth=None, color=u'white', colorSpace=u'rgb', opacity=1, depth=0.0) # Initialize components for Routine "trial" trialClock = core.Clock() movie = visual.MovieStim( win=win, name='movie', filename= u'/Users/MRIPsychology/Documents/MATLAB/Experiments/Child/nemo_firstdayofschool.mp4', ori=0, pos=(0, 0), opacity=1, depth=0.0, ) # Create some handy timers globalClock = core.Clock() # to track the time since experiment started routineTimer = core.CountdownTimer( ) # to track time remaining of each (non-slip) routine #------Prepare to start Routine "instructions"------- t = 0 instructionsClock.reset() # clock frameN = -1 # update component parameters for each repeat
import os #handy system and path functions from psychopy import visual, core, data, event, logging, gui, sound, info import psychopy.log #import like this so it doesn't interfere with numpy.log #setup the Window win = visual.Window(size=(1366, 768), fullscr=True, screen=0, allowGUI=True, allowStencil=False, monitor='testMonitor', color=[0, 0, 0], colorSpace='rgb') people = ['af1', 'af2', 'af3', 'af4', 'am1', 'am2', 'am3', 'am4', 'cf1'] emotions = ['happy', 'sad', 'mad', 'scared'] for exemplar in people: for emotion in emotions: for duration in ['6', '10', '12', '20']: try: mov = visual.MovieStim( win=win, filename='videos/%s/%s_%s/%s_%s_mov_%s.mov' % (exemplar, exemplar, emotion, exemplar, emotion, duration), pos=[0, 80], opacity=1, name='movie') except: print 'could not load', 'videos/%s/%s_%s/%s_%s_mov_%s.mov' % ( exemplar, exemplar, emotion, exemplar, emotion, duration)
from psychopy import visual, event, core import numpy win = visual.Window([600, 600], rgb=-1) gabor = visual.PatchStim(win, mask='gauss', pos=[-0.5, -0.5], color=[0, 0, 1], sf=5, ori=30) movie = visual.MovieStim(win, 'testMovie.mp4', units='pix', pos=[100, 100], size=[160, 120]) text = visual.TextStim(win, pos=[0.5, -0.5], text=u"unicode (eg \u03A8 \u040A \u03A3)", font=['Times New Roman']) faceRGB = visual.PatchStim(win, tex='face.jpg', pos=[-0.5, 0.5]) fixSpot = visual.PatchStim(win, tex="none", mask="gauss", size=(0.05, 0.05), color='white') myMouse = event.Mouse(win=win) t = 0.0 while True: #get mouse events mouse_dX, mouse_dY = myMouse.getRel()
#the event handlers defined above must be added to it #there are two event handler registration points: the low-level pyglet one inside the psychopy window, and the default psychopy.event #the psychopy.event module is built on the underlying pyglet, so there seems to be no collisions win.winHandle.push_handlers(on_key_press, on_key_release) #add event handlers defined above to the low-level pyglet event listener at winHandle ###LOAD & PACKAGE STIMULI### #Images start_background = test_background = visual.ImageStim(win, './stimuli/check.jpg') #test_background = visual.ImageStim(win, './stimuli/checkerboard.jpg') start_text = visual.TextStim(win, text='Press S To Start', color='red') #Movies for path in fam_paths: fam_playlist.append(visual.MovieStim(win, fam_root+path)) for path in part_paths: part_playlist.append(sound.Sound(part_root+path)) for path in word_paths: word_playlist.append(sound.Sound(word_root+path)) for path in get_paths: get_playlist.append(visual.MovieStim(win, get_root+path, loop=True)) ###EXPERIMENT### #Meta Data
extraInfo=expInfo, runtimeInfo=None, originPath=None, savePickle=False, saveWideText=False, dataFileName=filename) datFile=open('data' + os.path.sep + '%s_dyn_run%s.txt' %(expInfo['participant'], expInfo['run']),'a') datFile.write('Trial\tStim\tType\tOnset\n') #setup the Window win = visual.Window(size=(1920, 1080), fullscr=True, screen=0, allowGUI=False, allowStencil=False, monitor=u'testMonitor', color=u'black', colorSpace=u'rgb') #Initialise components for Routine "waiting" waitingClock=core.Clock() circles=visual.MovieStim(win=win, name='circles',units=u'norm', filename=u'stimuli/oblique1.mov', ori=0, pos=[0, 0], opacity=1, loop=True, size=2, depth=0.0, ) #Initialise components for Routine "fix" fixClock=core.Clock() fullscreen=visual.GratingStim(win=win, name='fullscreen',units=u'norm', tex=None, mask=None, ori=0, pos=[0, 0], size=2, sf=None, phase=0.0, color=1.0, colorSpace=u'rgb', opacity=1, texRes=128, interpolate=True, depth=0.0) #Initialise components for Routine "trial" trialClock=core.Clock() # Create some handy timers
def run(subjectID, subjectAge, subjectGender, date): ### ### Experiment data ### cwd = os.getcwd() output_dir = os.path.join(cwd, "output") sub_id = subjectID subject_dir = os.path.join(output_dir, str(sub_id)) # setting up sub info for the first time if not os.path.exists(subject_dir): sub_dict = {} os.mkdir(subject_dir) sub_dict["Age"] = str(subjectAge) sub_dict["Gender"] = subjectGender sub_dict["Date"] = str(date) sub_dict_path = os.path.join(subject_dir, 'subject_info.json') with open(sub_dict_path, 'w') as f: json.dump(sub_dict, f, sort_keys=True, indent=4) # get number of runs: num_runs = 20 ### ### Do all the setting up ### #create a window mywin = visual.Window([1000, 750], color=(255, 255, 255), monitor="testMonitor") #keep track of the mouse mouse = event.Mouse(visible=True) buttons = mouse.getPressed() #the rating scale(s): valence and arousal mark = visual.TextStim(mywin, text='|', color=(0, 0, 0), colorSpace='rgb255') valenceRatingScale = visual.RatingScale(mywin, low=1, high=200, marker=mark, markerColor='Black', scale=None, tickMarks=None, tickHeight=0, labels=('Negative', 'Positive'), showValue=False, lineColor='LightGray', stretch=2.5, markerExpansion=0.5, textColor='Black', showAccept=False, pos=(0, -0.3), textSize=0.6) arousalRatingScale = visual.RatingScale(mywin, low=1, high=200, marker=mark, markerColor='Black', scale=None, tickMarks=None, tickHeight=0, labels=('Low energy', 'High Energy'), showValue=False, lineColor='LightGray', stretch=2.5, markerExpansion=0.5, textColor='Black', showAccept=False, pos=(0, -0.5), textSize=0.6) next_button_text = visual.TextStim(mywin, text="Next", color=(0, 0, 0), colorSpace='rgb255', pos=(0, -280), height=20, units='pix') next_button = visual.Rect(mywin, width=150, height=50, units='pix', lineColor=(0, 0, 0), lineColorSpace='rgb255', pos=(0, -280), fillColor=(255, 255, 255), fillColorSpace='rgb255') # the play button for sounds play_button_text = visual.TextStim(mywin, text="Click play button to play sound", color=(0, 0, 0), colorSpace='rgb255', pos=(0, 0.2), height=0.05) button_vertices = [[-20, 33], [-20, -13], [20, 10]] play_button = visual.ShapeStim(mywin, units='pix', vertices=button_vertices, lineColor=(0, 0, 0), lineColorSpace='rgb255', pos=(0, 0), fillColor=(255, 255, 255), fillColorSpace='rgb255') # Set the stimulus directory stimulus_dir = os.path.join(os.path.dirname(cwd), 'STIMULI') ### ### Show instruction screen ### instructions = ( "In the following task, you will be presented with some visual" + " or auditory stimuli. Click and drag along the scales at the" + " bottom of the screen to reflect how negative or positive and" + " how low or high energy the video or sound is.\n\n\n" + " Click the button to start") instruction_text = visual.TextStim(mywin, text=instructions, color=(0, 0, 0), colorSpace='rgb255', pos=(0, 100), height=20, units='pix', wrapWidth=500) continue_text = visual.TextStim(mywin, text="Start", color=(0, 0, 0), colorSpace='rgb255', pos=(0, -50), height=20, units='pix') continue_button = visual.Rect(mywin, width=150, height=50, units='pix', lineColor=(0, 0, 0), lineColorSpace='rgb255', pos=(0, -50), fillColor=(255, 255, 255), fillColorSpace='rgb255') ready = False while not ready: instruction_text.draw() continue_button.draw() continue_text.draw() mywin.flip() if mouse.isPressedIn(continue_button, buttons=[0]): continue_button.setFillColor(color=(225, 225, 225), colorSpace='rgb255') instruction_text.draw() continue_button.draw() continue_text.draw() mywin.flip() core.wait(0.2) ready = True ### ### Do multiple runs ### for run in range(num_runs): order_data_path = os.path.join( subject_dir, 'videoPresentationOrder_run' + str(run) + '.json') order_data = open(order_data_path, 'w') stim_dict = {} stim_response_path = os.path.join( subject_dir, 'videoRatings_run' + str(run) + '.json') stim_response = open(stim_response_path, 'w') response_dict = {} # Pick the order of the images and the sounds video_binOrder = random.sample(range(4), 4) sound_binOrder = random.sample(range(7), 7) # Randomly picking the trials to show videos videoIndices = set(random.sample(range(11), 4)) vidCount = 0 soundCount = 0 ### ### Do the drawings ### for trial in range(11): if trial in videoIndices: mode = "vid" # pick a video file video_bin = VIDEOBINS[video_binOrder[vidCount]] video_dir = os.path.join(stimulus_dir, "videos", video_bin) video_file = os.path.join(video_dir, random.choice(os.listdir(video_dir))) # making the stimuli clip = visual.MovieStim(mywin, video_file, loop=True, units='pix', pos=(0, 120), size=(800, 400)) # adding files presented to dictionary stim_dict[trial] = video_file vidCount += 1 else: mode = "sound" # pick a video file sound_bin = SOUNDBINS[sound_binOrder[soundCount]] sound_dir = os.path.join(stimulus_dir, "sounds", sound_bin) sound_file = os.path.join(sound_dir, random.choice(os.listdir(sound_dir))) # making the stimuli soundClip = sound.Sound(sound_file, secs=2) # adding files presented to dictionary stim_dict[trial] = sound_file soundCount += 1 soundPlayed = False # reset things: rating = False #movie timer if mode == "vid": timer = core.CountdownTimer(clip.duration) # draw and wait for response while rating == False: if mode == "vid": if timer.getTime() == 0: clip = visual.MovieStim(mywin, video_file, loop=True, units='pix', pos=(0, 120), size=(800, 400)) timer.reset(clip.duration) clip.draw() if mode == "sound": play_button = visual.ShapeStim(mywin, units='pix', vertices=button_vertices, lineColor=(0, 0, 0), lineColorSpace='rgb255', pos=(0, 0), fillColor=(255, 255, 255), fillColorSpace='rgb255') play_button_text.draw() play_button.draw() valenceRatingScale.draw() arousalRatingScale.draw() next_button.setFillColor(color=(255, 255, 255), colorSpace='rgb255') next_button.draw() next_button_text.draw() mywin.flip() if mouse.isPressedIn(play_button, buttons=[0]): play_button = visual.ShapeStim(mywin, units='pix', vertices=button_vertices, lineColor=(0, 0, 0), lineColorSpace='rgb255', pos=(0, 0), fillColor=(225, 225, 225), fillColorSpace='rgb255') play_button.draw() play_button_text.draw() valenceRatingScale.draw() arousalRatingScale.draw() next_button.draw() next_button_text.draw() mywin.flip() mouse.clickReset() core.wait(0.2) soundClip.play() soundPlayed = True if mouse.isPressedIn(next_button, buttons=[0]): next_button.setFillColor(color=(225, 225, 225), colorSpace='rgb255') next_button.draw() next_button_text.draw() if mode == "vid": clip.draw() if mode == "sound": play_button.draw() play_button_text.draw() valenceRatingScale.draw() arousalRatingScale.draw() mywin.flip() mouse.clickReset() core.wait(0.2) if mode == "vid" or soundPlayed == True: if valenceRatingScale.getRating( ) and arousalRatingScale.getRating(): rating = True finalValenceRating = valenceRatingScale.getRating( ) / 2 finalArousalRating = arousalRatingScale.getRating( ) / 2 #if sound is still playing, stop if mode == "sound": soundClip.stop() # add response to dictionary, whether or not heard sound response_dict[trial] = [finalValenceRating, finalArousalRating] valenceRatingScale.reset() arousalRatingScale.reset() # clean the window mywin.flip() ### ### write data to files ### json.dump(stim_dict, order_data, sort_keys=True, indent=4) json.dump(response_dict, stim_response, sort_keys=True, indent=4) finish_text = "End" finish = visual.TextStim(mywin, text=finish_text, color=(0, 0, 0), colorSpace='rgb255', pos=(0, 0), height=0.075) finish.draw() mywin.flip() core.wait(5) #cleanup mywin.close() order_data.close() stim_response.close() core.quit()
#pyglet window class instance is stored in the psyhopy.visual.Window object at winHandle #the event handlers defined above must be added to it #there are two event handler registration points: the low-level pyglet one inside the psychopy window, and the default psychopy.event #the psychopy.event module is built on the underlying pyglet, so there seems to be no collisions win.winHandle.push_handlers( on_key_press, on_key_release ) #add event handlers defined above to the low-level pyglet event listener at winHandle ###LOAD STIMULI### #Start Screen start_background = test_background = visual.ImageStim(win, './stimuli/check.jpg') fam_mov = visual.MovieStim(win, './stimuli/tsums.mp4', loop=True) #test_background = visual.ImageStim(win, './stimuli/checkerboard.jpg') start_text = visual.TextStim(win, text='Press S To Start', color='red') ag = sound.Sound("./stimuli/ag.wav") #Stims for path in fam_varset_paths: fam_varset_playlist.append(sound.Sound(fam_varset_root + path)) for path in fam_scramble_paths: fam_scramble_playlist.append(sound.Sound(fam_scramble_root + path)) for path in test_paths: test_playlist.append(sound.Sound(test_root + path)) random.shuffle(test_playlist)
font=u'Arial', pos=[0, 0], height=0.3, wrapWidth=None, color=u'white', colorSpace=u'rgb', opacity=1, depth=0.0) # Initialize components for Routine "trial" trialClock = core.Clock() movie = visual.MovieStim( win=win, name='movie', filename= u'/Users/MRIPsychology/Documents/MATLAB/Experiments/Child/inscapes.mov', ori=0, pos=(0, 0), opacity=1, depth=0.0, ) # Create some handy timers globalClock = core.Clock() # to track the time since experiment started routineTimer = core.CountdownTimer( ) # to track time remaining of each (non-slip) routine #------Prepare to start Routine "instructions"------- t = 0 instructionsClock.reset() # clock frameN = -1 # update component parameters for each repeat
practice_trials_02 = data.TrialHandler( nReps=1, method='random', trialList=data.importConditions('conditions/practice_02.csv')) total_score = 0 for thisTrial in practice_trials_02: if thisTrial != None: for paramName in thisTrial.keys(): exec(paramName + '=thisTrial.' + paramName) trial_number += 1 mov = visual.MovieStim(win=win, filename='videos/%s/%s_%s/%s_%s_mov_%s.mov' % (exemplar, exemplar, imagedict[condition], exemplar, imagedict[condition], duration), pos=[0, 80], opacity=1, name='movie') output = run_trial(dev, mov, thisTrial, trial_number) total_score += output['score'] if total_score == practice_trials_02.nTotal: break #replay instructions present_instructions([text_replay_02], audio_object=audio_replay_02) #start practice_03 present_instructions([text_03], audio_object=audio_instructions_03) while True:
blendMode='avg', useFBO=False, winType='pyglet') aspect_ratio = 1200 / 1600.0 image = visual.ImageStim(win2, "banana_small_alpha.png", size=4.0, units="deg") grating = MovingGratingStim(win2, mask='gauss', units="deg", sf=1.0, ori=60.0, size=2.0, color=(1.0, 0.0, 0.0), colorSpace='rgb') movie = visual.MovieStim( win2, "/Users/roger/Documents/research/monkey/data/movies/animals.mp4", flipVert=False) fixation_dot = visual.Circle(win2, 50.0, units="pix", fillColor=(1.0, 1.0, 1.0), fillColorSpace='rgb') def do_every(period, f, *args): def g_tick(): t = time.time() count = 0 while True: count += 1
font=text_font) #-------- # IMAGES #-------- photo = visual.ImageStim(win=win, image="stim/landscape-1920x1080.jpg", units="deg", size=(32, 18)) #-------- # VIDEO #-------- filename = 'stim/' + expInfo["Show"] + '.mp4' movie = visual.MovieStim(win, filename, size=(32, 18), loop=True), #============ # INITIALIZE #============ if triggers: sendTrigger(triggerList["eegStart"]) #start EEG trigger #text before rest restMessage.draw() win.flip() keypress = event.waitKeys(keyList=['space', 'escape']) if keypress[0] == 'escape': core.quit() # Blank screen for thisFrame in range(timeInFrames["BEFORE"]):
run_data = {c: [] for c in COLUMNS} filename = op.join( script_dir, 'data', 'sub-{0}_ses-{1}_task-{2}_run-{3}_events'.format( exp_info['Subject'].zfill(2), exp_info['Session'].zfill(2), taskname, run_label)) outfile = filename + '.tsv' logfile = logging.LogFile(filename + '.log', level=logging.INFO) logging.console.setLevel( logging.INFO) # this outputs to the screen, not a file # Reset BioPac if exp_info['BioPac'] == 'Yes': ser.write('RR') video = visual.MovieStim(window, filename=video_file, name=exp_info['Film'], volume=1.) width, height = video.size aspect_ratio = width / height min_ratio = min(window.size[0] / width, aspect_ratio * window.size[1] / width) new_width = min_ratio * width new_height = min_ratio * width / aspect_ratio video.setSize((new_width, new_height)) # Scanner runtime # --------------- # Wait for trigger from scanner. draw_until_keypress(win=window, stim=waiting) # Start recording
# -*- coding: utf-8 -*- from psychopy import visual, core, event, gui, data, misc import numpy, os, random, time, csv # 画面の準備(灰色の画面、マウスはallowGUI=Falseで表示されないようにしている) myWin = visual.Window(fullscr=True, allowGUI=False, color=(0, 0, 0)) #現在このコードのある場所のパスを取得して、そのパスの1つ下のstimフォルダに移動する(動画ファイルを読み込むため) curD = os.getcwd() os.chdir(os.path.join(curD, 'stimli')) try: #時計の準備 stopwatch = core.Clock() #動画刺激の準備 mov1 = visual.MovieStim(myWin, 'sea.mov', size=[640, 480]) mov1.play() #時計のリセット stopwatch.reset() #動画を30秒間呈示する while stopwatch.getTime() < 30: mov1.draw() myWin.flip() if event.getKeys(keyList=['escape', 'q']): myWin.close() core.quit() myWin.close() core.quit() except TypeError, e: print e
from psychopy import visual, core, event win = visual.Window([800, 600]) mov = visual.MovieStim(win, 'jwpIntro.mov', size=[320, 240], flipVert=False, flipHoriz=False, loop=True) print('orig movie size=[%i,%i]' % (mov.format.width, mov.format.height)) print('duration=%.2fs' % (mov.duration)) globalClock = core.Clock() while mov.status != visual.FINISHED: mov.draw() win.flip() if event.getKeys(keyList=['escape', 'q']): win.close() core.quit() core.quit() """Different systems have different sets of codecs. avbin (which PsychoPy uses to load movies) seems not to load compressed audio on all systems. To create a movie that will play on all systems I would recommend using the format: video: H.264 compressed, audio: Linear PCM """
for thisTrial in trials: currentLoop = trials # abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb) if thisTrial != None: for paramName in thisTrial.keys(): exec(paramName + '= thisTrial.' + paramName) #------Prepare to start Routine "vid"------- t = 0 vidClock.reset() # clock frameN = -1 # update component parameters for each repeat movie = visual.MovieStim(win=win, name='movie', filename=stim, ori=0, pos=[0, 0], opacity=1, depth=0.0, ) # key_resp_temp = event.BuilderKeyResponse() # create an object of type KeyResponse # key_resp_temp.status = NOT_STARTED # keep track of which components have finished vidComponents = [] vidComponents.append(movie) # vidComponents.append(key_resp_temp) for thisComponent in vidComponents: if hasattr(thisComponent, 'status'): thisComponent.status = NOT_STARTED #-------Start Routine "vid"------- continueRoutine = True while continueRoutine: