def make_stims():
    
    '''This function creates our video stim objects and assigns to respective video lists. 
    It then assigned each video a name (directory and filename)'''
    
    global control_prog_stims, control_advert_stims, target_prog_stims, target_advert_stims
    
    # create video stimuli and assign to respective lists
    target_prog_stims   = [visual.MovieStim3(win, target_prog_dir + "/" + stim) for stim in target_prog_subset[:]]
    target_advert_stims = [visual.MovieStim3(win, target_ad_dir + "/" + stim) for stim in target_ad_subset[:]]
    control_prog_stims   = [visual.MovieStim3(win, control_prog_dir + "/" + stim) for stim in control_prog_subset[:]]
    control_advert_stims = [visual.MovieStim3(win, control_ad_dir + "/" + stim) for stim in control_ad_subset[:]]

    # assign each video stim a name
    for counter, stim in enumerate(target_prog_stims):
        stim.name = target_prog_names[counter]
    for counter, stim in enumerate(target_advert_stims):
        stim.name = target_ad_names[counter]
    for counter, stim in enumerate(control_prog_stims):
        stim.name = control_prog_names[counter]
    for counter, stim in enumerate(control_advert_stims):
        stim.name = control_ad_names[counter]

    # returns 4 video stim lists
    return target_prog_stims, target_advert_stims, control_prog_stims, control_advert_stims
    def draw_cal_target(self, x, y):
        '''Draw the calibration/validation & drift-check  target'''

        self.clear_cal_display()
        xVis = (x - self.w / 2)
        yVis = (self.h / 2 - y)

        if self.calTarget is 'default':
            self.calibTar.pos = (xVis, yVis)
            self.calibTar.draw()
            self.display.flip()
        else:
            if self.calTarget is 'rotatingCheckerboard':
                self.calibTar = visual.RadialStim(self.display,
                                                  tex='sqrXsqr',
                                                  mask='circle',
                                                  radialCycles=2,
                                                  angularCycles=6,
                                                  size=1.0 / 20 * self.w)
            if self.calTarget is 'movie':
                self.calibTar = visual.MovieStim3(self.display,
                                                  self.movieTargetFile,
                                                  loop=True)

            self.calibTar.pos = (xVis, yVis)
            self.animatedTarget = True  # turn on target animation
Beispiel #3
0
    def movie_setup(self, window):
        """Bring movie onto the monitor and connect with eyelink."""
        ## GET ACTUAL PARAMETERS FOR THESE
        scnWidth = self.screen_width
        scnHeight = self.screen_height

        window.mouseVisible = False
        # set up movie
        movie = visual.MovieStim3(window,
                                  self.video_path,
                                  flipVert=False,
                                  flipHoriz=False,
                                  noAudio=True,
                                  loop=False)
        self.movie_x, self.movie_y = movie.size

        self.align_x = self.screen_width / 2 - self.movie_x / 2
        self.align_y = self.screen_height / 2 - self.movie_y / 2

        # callcustom calibrationmethod to coordinate screens
        screen_share = EyeLinkCoreGraphicsPsychoPy(self.tracker, window)
        pylink.openGraphicsEx(screen_share)

        frame_time = movie.getCurrentFrameTime
        # color theme of the calibration display
        #        pylink.setCalibrationColors((255,255,255), (0,0,0))
        return window, movie, frame_time
Beispiel #4
0
 def test_mov(self):
     win = self.win
     if self.win.winType == 'pygame':
         pytest.skip("movies only available for pyglet backend")
     elif _travisTesting and not _anacondaTesting:
         pytest.skip("Travis with system Python doesn't seem to have a "
                     "working ffmpeg")
     win.flip()
     #construct full path to the movie file
     fileName = os.path.join(utils.TESTS_DATA_PATH, 'testMovie.mp4')
     #check if present
     if not os.path.isfile(fileName):
         raise IOError('Could not find movie file: %s' %
                       os.path.abspath(fileName))
     #then do actual drawing
     pos = [0.6 * self.scaleFactor, -0.6 * self.scaleFactor]
     mov = visual.MovieStim3(win, fileName, pos=pos, noAudio=True)
     mov.setFlipVert(True)
     mov.setFlipHoriz(True)
     for frameN in range(10):
         mov.draw()
         if frameN == 0:
             utils.compareScreenshot('movFrame1_%s.png' % self.contextName,
                                     win,
                                     crit=10)
         win.flip()
     "{}".format(mov)  #check that str(xxx) is working
Beispiel #5
0
    def test_mov(self):
        win = self.win
        if self.win.winType == 'pygame':
            pytest.skip("movies only available for pyglet backend")

        win.flip()
        #construct full path to the movie file
        fileName = os.path.join(utils.TESTS_DATA_PATH, 'testMovie.mp4')
        #check if present
        if not os.path.isfile(fileName):
            raise IOError('Could not find movie file: %s' %
                          os.path.abspath(fileName))
        #then do actual drawing
        pos = [0.6 * self.scaleFactor, -0.6 * self.scaleFactor]
        mov = visual.MovieStim3(win, fileName, pos=pos, noAudio=True)
        mov.setFlipVert(True)
        mov.setFlipHoriz(True)
        if sys.platform == 'darwin':
            threshold = 30
        else:
            threshold = 11
        for frameN in range(10):
            mov.draw()

            if frameN == 0:
                utils.compareScreenshot('movFrame1_%s.png' % self.contextName,
                                        win,
                                        crit=threshold)
            win.flip()
        "{}".format(mov)  #check that str(xxx) is working
Beispiel #6
0
def video_dimensions(video_path, fullscreen=0, x_size=1000, y_size=1000):
    """load movie onto background."""
    window = visual.Window([x_size, y_size],
                           fullscr=fullscreen,
                           units="pix",
                           color=[1, 1, 1])
    movie = visual.MovieStim3(window, video_path, flipVert=False)
    window.mouseVisible = False
    return movie, window
Beispiel #7
0
def get_target(file_name, file_type, win):
    """
    Creates a PsychoPy object for given stimulus.
    file_name = stimulus file name
    Returns a list containing the PsychoPy object (f) and filename without the extension (name[0]).
    """
    if file_type == "audio":
        f = sound.Sound(value = my_dir + '/' + file_name).play
    elif file_type == "picture":
        f = visual.ImageStim(win, image = my_dir + '/' + file_name, pos = [0,0.2]).draw
    elif file_type == "video":
        f = visual.MovieStim3(win, filename = my_dir + '/' + file_name, pos = [0,0.2], units = "norm", size=(0.8,0.8), loop = True).draw
    name = file_name
    return [f, name]
def load_movies(idx_run):
    """Load movies which will be displayed in the current run."""
    global nr_movies, movie_orders, video_paths

    # initiate list of lists to store moviestim
    movies = list()
    [movies.append([]) for m in range(nr_movies)]

    for j in movie_orders[idx_run]:
        print 'Loading movie %i' % j
        movies[j] = visual.MovieStim3(win, video_paths[j],
                                      size=(1280, 720),
                                      pos=[0, 0], flipVert=False,
                                      flipHoriz=False, loop=False,
                                      noAudio=True)
    return movies
Beispiel #9
0
def play_example(win, movie, timing, trigger=None):
    mov = visual.MovieStim3(win,
                            'movies/examples/' + movie,
                            size=[1080, 637.5],
                            flipVert=False,
                            flipHoriz=False,
                            loop=False,
                            noAudio=True)

    timer = core.CountdownTimer(timing)
    mov_start = core.getTime()
    event.clearEvents(eventType='keyboard')

    while mov.status != visual.FINISHED and timer.getTime() > 0:
        mov.draw()
        win.flip()
Beispiel #10
0
def introduce_new_objects(tracker, stimpaths):

    videofiles = [os.path.join(path, f) for path in stimpaths for f in os.listdir(path) if f.endswith(".avi")]
    imgfiles = [os.path.join(path, f) for path in stimpaths for f in os.listdir(path) if f.endswith(".png")]

    anim_stims = [visual.MovieStim3(win, filename=videofile, size=c.STIMSIZE, flipVert=False, noAudio=True) for videofile in videofiles]
    img_stims = [visual.ImageStim(win, image=imgfile, size=c.STIMSIZE) for imgfile in imgfiles]

    for n in range(len(anim_stims)):

        occluders = _create_occluders(2)

        # The last test round has "vertical ending"
        isVertical=True if n==len(anim_stims)-1 else False

        int_side = stimpaths[n].split("_")[-2]
        i_first = True if stimpaths[n].split("_")[-1]=="first" else False
        t = 1.0 if i_first else 6.5

        show_face(tracker)

        tracker.log("Familiarisation_anim{0}_{1}{2}".format(str(n+1), int_side, "1" if i_first else "2"))

        # for code debugging
        print("\nFamiliarisation animation {0}".format(n+1))
        print("\tinteresting side:", int_side, "; moves:", " first" if i_first else " second")

        _play_familiarisation_anim(tracker, anim_stims[n], t)
        img_stim = img_stims[n]

        ### occluders float down to cover objects ###
        event.clearEvents()
        tracker.log("occluders_going_down")
        snd.snd_down.play()
        _move_occluders(occluders, frames=c.occl_lim, positions=[c.rect_pos1, c.rect_pos2],
                               step= -c.down, objs=[img_stim])
        time.sleep(1)

        # shuffle
        tracker.log("shuffle_starts")
        sposs = occl_anim.shuffle_occluders(win, occluders, False, isVertical)

        labeling.label_objects(win, occluders, tracker=tracker, isFamiliar=False, isVertical=isVertical,
                               sposs=sposs, side=random.choice([0,1]), label_snd=int_label_snd)

        tracker.log("Labeling_{0}_ENDS".format("test_object"))
Beispiel #11
0
    def update_cal_target(self):
        ''' make sure target stimuli is already memory when being used by draw_cal_target '''

        if self.calTarget is 'picture':
            if self.pictureTargetFile is None:
                print(
                    'ERROR: Clibration target is None, please provide a picture'
                )
                core.quit()
            else:
                self.calibTar = visual.ImageStim(self.display,
                                                 self.pictureTargetFile,
                                                 size=self.targetSize)

        elif self.calTarget is 'spiral':
            thetas = numpy.arange(0, 1440, 10)
            N = len(thetas)
            radii = numpy.linspace(0, 1.0, N) * self.targetSize
            x, y = pol2cart(theta=thetas, radius=radii)
            xys = numpy.array([x, y]).transpose()
            self.calibTar = visual.ElementArrayStim(self.display,
                                                    nElements=N,
                                                    sizes=self.targetSize,
                                                    sfs=3.0,
                                                    xys=xys,
                                                    oris=-thetas)
        elif self.calTarget is 'movie':
            if self.movieTargetFile is None:
                print(
                    'ERROR: Clibration target is None, please provide a movie clip'
                )
                core.quit()
            else:
                self.calibTar = visual.MovieStim3(self.display,
                                                  self.movieTargetFile,
                                                  loop=True,
                                                  size=self.targetSize)

        else:  #'use the default 'circle'
            self.calibTar = visual.Circle(self.display,
                                          radius=self.targetSize / 2,
                                          lineColor=self.foregroundColor,
                                          fillColor=self.backgroundColor,
                                          lineWidth=self.targetSize / 2.0,
                                          units='pix')
Beispiel #12
0
    def _setup(self, exp_win):

        self.movie_stim = visual.MovieStim3(exp_win, self.filepath, units='pixels')
        aspect_ratio = self._aspect_ratio or self.movie_stim.size[0]/self.movie_stim.size[1]
        min_ratio =  min(
            exp_win.size[0]/ self.movie_stim.size[0],
            exp_win.size[1]/ self.movie_stim.size[0]*aspect_ratio)


        width = min_ratio*self.movie_stim.size[0]
        height = min_ratio*self.movie_stim.size[0]/aspect_ratio

        if self._scaling is not None:
            width *= self._scaling
            height *= self._scaling

        self.movie_stim.size = (width, height)
        self.duration = self.movie_stim.duration
Beispiel #13
0
def video(vid):
    win = visual.Window((800, 600))
    mov = visual.MovieStim3(win,
                            vid,
                            size=(320, 240),
                            flipVert=False,
                            flipHoriz=False,
                            loop=False)
    print('orig movie size=%s' % mov.size)
    print('duration=%.2fs' % mov.duration)
    globalClock = core.Clock()

    while mov.status != visual.FINISHED:
        mov.draw()
        win.flip()
        if event.getKeys():
            break

    win.close()
    core.quit()
def _play_familiarisation_anim(stim_dir):
    """Plays animation with interesting and boring objects. Returns the ending screenshot."""

    videofile = next((os.path.join(stim_dir, f)
                      for f in os.listdir(stim_dir) if f.endswith(".avi")),
                     None)
    imgfile = next((os.path.join(stim_dir, f)
                    for f in os.listdir(stim_dir) if f.endswith(".png")), None)

    anim_stim = visual.MovieStim3(win,
                                  videofile,
                                  size=(1920, 1080),
                                  flipVert=False)
    img_stim = visual.ImageStim(win, imgfile, size=(1920, 1080))
    #    print("\tanimation:", os.path.basename(videofile))

    int_sound = sound.Sound(value=c.INT_SOUND)
    int_sound.setVolume(0.35)

    #    int_side = "left" if imgfile.endswith("0.png") else "right" # interesting side be determined from the name of imagefile -> to pass on to labeling
    i_first = True if videofile.split(".")[0].endswith("1") else False
    #    print("\tanimation int_side:", int_side)
    #    print("\ti_first:", i_first)

    t = 1.0 if i_first else 6.5
    snd_start = Timer(t, int_sound.play)
    snd_start.start()

    event.clearEvents()
    while anim_stim.status != visual.FINISHED:

        anim_stim.draw()
        win.flip()

        if _getKeypress():
            snd_start.cancel()
            int_sound.stop()
            break

    return img_stim
Beispiel #15
0
def ShowMovie(Window, MoviePath, Scale = 1):
    bgcolor = Window.color
    # Set window background color to black.
    Window.setColor([-1, -1, -1])
    # Create movie object
    Movie = visual.MovieStim3(Window, MoviePath, flipVert=False, units='pix')

    # Maintain Movie Aspect Ratio, based on smallest Window dimension
    WinSize = Window.size
    MovSize = Movie.size
    RelaSize = WinSize/MovSize
    MovScale = np.min(RelaSize * Scale)
    Movie.setSize(MovSize*MovScale)

    while Movie.status != visual.FINISHED:
        CheckQuitWindow(Window)
        Movie.draw()
        Window.flip()

    # Return background color to the original color
    Window.setColor(bgcolor)

    return None
Beispiel #16
0
def play_movie(win, movie, timing, trigger=None):
    mov = visual.MovieStim3(win,
                            'movies/' + movie,
                            size=[1080, 637.5],
                            flipVert=False,
                            flipHoriz=False,
                            loop=False,
                            noAudio=True)

    timer = core.CountdownTimer(timing)
    mov_start = core.getTime()
    if trigger:
        trigger.flicker(1)
    event.clearEvents(eventType='keyboard')

    while mov.status != visual.FINISHED and timer.getTime() > 0:
        mov.draw()
        win.flip(clearBuffer=False)

    last_frame = visual.BufferImageStim(win,
                                        buffer='front',
                                        rect=(-.8, 0.8, 0.8, -0.8))
    last_frame.autoDraw = True
    return mov_start, last_frame
Beispiel #17
0
from psychopy import visual, core, event, sound, logging
logging.console.setLevel(
    logging.DEBUG)  #get messages about the sound lib as it loads

win = visual.Window([1000, 1000])
#sound.init(48000,buffer=500)

globalClock = core.Clock()

#mov._audioStream = testSound
for trl in range(0, 4):
    mov = visual.MovieStim3(
        win,
        r'C:\TCDTIMIT\volunteersSmall\s60T\straightcam\TestVideo.mp4',
        flipVert=False,
        flipHoriz=False,
        loop=False,
        noAudio=True)
    testSound = sound.Sound(
        r'C:\TCDTIMIT\volunteersSmall\s60T\straightcam\TestVideo.wav',
        sampleRate=48000)
    #core.wait(.2)
    print('orig movie size=%s' % (mov.size))
    print('duration=%.2fs' % (mov.duration))
    movStart = 1
    while mov.status != visual.FINISHED:
        mov.draw()
        win.flip()
        if movStart:
            testSound.play()
Beispiel #18
0
### (1) change the audio driver to "portaudio" (otherwise the program won't stop running when it's done)
### (2) change the audio library to "pygame" (otherwise the audio won't stop playing when the video stops)

# set up current folder path
parent_dir = "/Users/mli/Desktop/psychopy-tutorial/"

# load Psychopy modules for visual stimuli, audio stimuli and clock
from psychopy import visual, core, sound

# set up the window where the stimuli will be presented on
win = visual.Window(size=[800, 500], color="black", fullscr=False, units="pix")

# set up the video stimulus
mov = visual.MovieStim3(win,
                        parent_dir + 'stim/baby_laugh.mp4',
                        size=(320, 240),
                        flipVert=False,
                        flipHoriz=False,
                        loop=False)

# play the video clip for 10 seconds, starting at 20 seconds
t0 = core.getTime()
mov.seek(20)  # start play at 20 seconds
while core.getTime() - t0 <= 10:
    mov.draw()
    win.flip()

# # play the entire video clip
# while mov.status != visual.FINISHED:
#     mov.draw()
#     win.flip()
# Start Code - component code to be run before the window creation
# Create some handy timers
globalClock = core.Clock()  # to track the time since experiment started
routineTimer = core.CountdownTimer(
)  # to track time remaining of each (non-slip) routine
# store frame rate of monitor if we can measure it
win = visual.Window((800, 600))
expInfo['frameRate'] = win.getActualFrameRate()
if expInfo['frameRate'] != None:
    frameDur = 1.0 / round(expInfo['frameRate'])
else:
    frameDur = 1.0 / 60.0  # could not measure, so guess
mov = visual.MovieStim3(win,
                        'des_me.mp4',
                        size=(640, 480),
                        flipVert=False,
                        flipHoriz=False,
                        loop=False,
                        pos=(0, 100))
print('orig movie size=%s' % mov.size)
print('duration=%.2fs' % mov.duration)
globalClock = core.Clock()
thisResp = None
counter = 0
thisKey = '5'
counterlist = []
keylist = []
timelist = []
textStim = visual.TextStim(win=win,
                           units='pix',
                           height=300,
Beispiel #20
0
]
SOUNDSTIM = 'infant/wawa.wav'

###############################################################################
# Demo
# create a Window to control the monitor
win = visual.Window(size=[1280, 1024],
                    units='pix',
                    fullscr=True,
                    allowGUI=False)

# initialize TobiiInfantController to communicate with the eyetracker
controller = TobiiInfantController(win)

# setup the attention grabber during adjusting the participant's position
grabber = visual.MovieStim3(win, "infant/seal-clip.mp4")
# prepare the audio stimuli used in calibration
calibration_sound = sound.Sound(SOUNDSTIM)

grabber.setAutoDraw(True)
grabber.play()
# show the relative position of the subject to the eyetracker
# Press space to exit
controller.show_status()

# stop the attention grabber
grabber.setAutoDraw(False)
grabber.stop()

# How to use:
# - Use 1~9 (depending on the number of calibration points) to present
    if thisTrials_loop != None:
        for paramName in thisTrials_loop:
            exec('{} = thisTrials_loop[paramName]'.format(paramName))

    # ------Prepare to start Routine "trials"-------
    t = 0
    trialsClock.reset()  # clock
    frameN = -1
    continueRoutine = True
    # update component parameters for each repeat
    movie = visual.MovieStim3(
        win=win,
        name='movie',
        noAudio=True,
        filename=vid,
        ori=0,
        pos=(0, 0),
        opacity=1,
        size=1500,
        depth=0.0,
    )
    movie_key = event.BuilderKeyResponse()
    # keep track of which components have finished
    trialsComponents = [movie, movie_key, text_2]
    for thisComponent in trialsComponents:
        if hasattr(thisComponent, 'status'):
            thisComponent.status = NOT_STARTED

    # -------Start Routine "trials"-------
    while continueRoutine:
        # get current time
Beispiel #22
0
     question = visual.TextStim(win,
                                text=question_text,
                                pos=(0, .4),
                                alignHoriz='center',
                                alignVert='bottom',
                                wrapWidth=2,
                                color='black',
                                name='What object?')
     stimuli[trial] = [probe_left, probe_right, probe_center, question]
 elif stim_type == 'sketch':
     # load the sketch video
     clip_fn = join(STIMDIR, trial_obj + '_' + str(stim_number) + '_6s.mov')
     stimuli[trial] = visual.MovieStim3(win,
                                        clip_fn,
                                        pos=(0, 0),
                                        flipVert=False,
                                        flipHoriz=False,
                                        loop=False,
                                        noAudio=True,
                                        name=trial_obj)
 elif stim_type == 'Instruct_Animate':
     stimuli[trial] = visual.TextStim(
         win,
         wrapWidth=1.8,
         alignHoriz='center',
         alignVert='center',
         name='Instructions',
         text=("Attempt to see\n\n"
               "ANIMATE \n\nReport what you saw"),
         color='black')
     # pos=[-.9, .6],
 elif stim_type == 'Instruct_Inanimate':
Beispiel #23
0
            videopath,
            size=720,
            # pos specifies the /center/ of the movie stim location
            pos=[0, 150],
            flipVert=False,
            flipHoriz=False,
            loop=False,
            noAudio=True)
        audioSound = sound.Sound(tmpSoundFile, sampleRate=48000)
        soundDur = audioSound.getDuration()
    else:
        mov = visual.MovieStim3(
            win,
            videopath,
            size=720,
            # pos specifies the /center/ of the movie stim location
            pos=[0, 150],
            flipVert=False,
            flipHoriz=False,
            loop=False,
            noAudio=True)
        audioSound = sound.Sound(tmpSoundFile, sampleRate=48000)
        soundDur = audioSound.getDuration()

    keystext = "PRESS 'escape' to Quit.\n"
    text = visual.TextStim(win, keystext, pos=(0, -250), units='pix')

    #Only draw more than 1 frame if this is a video "OFF" trial
    firstFrame = 1

    movStart = core.getTime()
    while core.getTime(
Beispiel #24
0
    if x.endswith('.png') and not x.startswith('.')
]

###############################################################################
# Demo
# create a Window to control the monitor
win = visual.Window(size=[1280, 1024],
                    units='pix',
                    fullscr=True,
                    allowGUI=False)

# initialize TobiiInfantController to communicate with the eyetracker
controller = TobiiInfantController(win)

# setup the attention grabber during adjusting the participant's position
grabber = visual.MovieStim3(win, "infant/seal-clip.mp4")
grabber.setAutoDraw(True)
grabber.play()
# show the relative position of the subject to the eyetracker
# Press space to exit
controller.show_status()

# stop the attention grabber
grabber.setAutoDraw(False)
grabber.stop()

# How to use:
# - Use 1~9 (depending on the number of calibration points) to present
#   calibration stimulus and 0 to hide the target.
# - Press space to start collect calibration samples.
# - Press return (Enter) to finish the calibration and show the result.
Beispiel #25
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

from __future__ import division
from datetime import datetime
#from psychopy import psychtoolbox as ptb
from psychopy import visual, core, event, sound
#from psychopy.visual import vlc
import time, os
import csv, random
import pyglet

win = visual.Window(fullscr="TRUE")

video = "/Users/admin/Documents/artificialLanguageSegmentation/pilot5/new/A_Disrupter0_noun0_Singularnoun_verb1_ProgressiveY_Singularverb_Disrupter0_2.mov"
mov = visual.MovieStim3(win, video, flipVert=False)
mov.play()
mov.draw()
        stoporcontinue(key)
    c = c + 1
    win.update()
    core.wait(0.5)
    if c == len(video_list) / 4:
        showinstructions(text4ta, 0.5)
    if c == len(video_list) / 2:
        showinstructions(text4tb, 0.5)
    if c == (len(video_list) / 2 + len(video_list) / 4):
        showinstructions(text4tc, 0.5)
    fixation.draw()
    win.update()
    core.wait(0.5)
    mov = visual.MovieStim3(win,
                            video,
                            size=(1010, 605),
                            flipVert=False,
                            flipHoriz=False)
    while mov.status != visual.FINISHED:
        mov.draw()
        win.flip()

win.flip()
core.wait(2)

#########TEST INSTRUCTIONS

text4 = u"Vous allez maintenant entendre deux sons."
text4_ = u"Attention, vous ne les entendrez qu'une seule fois!"
text5 = u"Quel son ressemble le plus à la langue que vous venez d'entendre?"
text5_ = u"Ne réfléchissez pas trop et allez-y avec votre instinct!"
Beispiel #27
0
def run_trial(pars):
    """ pars corresponds to a row in the trial list"""

    # Retrieve parameters from the trial list
    trial_num, movie_file = pars

    # Load the video to display
    mov = visual.MovieStim3(win, filename=movie_file, size=(960, 540))

    # Take the tracker offline
    tk.setOfflineMode()
    pylink.msecDelay(50)

    # Send the standard "TRIALID" message to mark the start of a trial
    tk.sendMessage("TRIALID %s %s" % (trial_num, movie_file))

    # Record_status_message : show some info on the Host PC
    msg = "record_status_message 'Movie File: %s'" % movie_file
    tk.sendCommand(msg)

    # Drift check/correction, params, x, y, draw_target, allow_setup
    try:
        tk.doDriftCorrect(int(SCN_WIDTH / 2), int(SCN_HEIGHT / 2), 1, 1)
    except:
        tk.doTrackerSetup()

    # Start recording;
    # params: sample_in_file, event_in_file,
    # sampe_over_link, event_over_link (1-yes, 0-no)
    tk.startRecording(1, 1, 1, 1)
    # Wait for 50 ms to cache some samples
    pylink.msecDelay(50)

    # The size of the video
    mo_width, mo_height = mov.size
    # position the movie at the center of the screen
    mov_x = int(SCN_WIDTH / 2 - mo_width / 2)
    mov_y = int(SCN_HEIGHT / 2 - mo_height / 2)

    # play the video till the end
    frame_n = 0
    prev_frame_timestamp = mov.getCurrentFrameTime()
    while mov.status is not STOPPED:
        # draw a movie frame and flip the video buffer
        mov.draw()
        win.flip()

        # if a new frame is drawn, check frame timestamp and
        # send a VFRAME message
        current_frame_timestamp = mov.getCurrentFrameTime()
        if current_frame_timestamp != prev_frame_timestamp:
            frame_n += 1
            # send a message to mark the onset of each video frame
            tk.sendMessage('Video_Frame: %d' % frame_n)
            # VFRAME message: "!V VFRAME frame_num movie_pos_x,
            # movie_pos_y, path_to_movie_file"
            m_path = '../' + movie_file
            msg = "!V VFRAME %d %d %d %s" % (frame_n, mov_x, mov_y, m_path)
            tk.sendMessage(msg)
            prev_frame_timestamp = current_frame_timestamp

    # Send a message to mark video playback end
    tk.sendMessage("Video_terminates")

    # Clear the subject display
    win.color = (0, 0, 0)
    win.flip()

    # Stop recording
    tk.stopRecording()

    # Send a'TRIAL_RESULT' message to mark the end of trial
    tk.sendMessage('TRIAL_RESULT')
######################
# Movie Components
######################
# Initialize components for Routine "Movie"
MovieClock = core.Clock()

# video_file = os.path.join(video_dir, 'kungfury.mp4')
# inscapes_file = os.path.join(video_dir, '01_Inscapes_NoScannerSound_h264.wmv')

# Preload Movies
movie1 = visual.MovieStim3(
    win=win, name='movie',
    noAudio = False,
    filename=movieOrder[0]['runSeq'][0]['moviefile'],
    # filename='C:\\Users\\Michael\\Dropbox (Dartmouth College)\\CANLab Projects\\WASABI\\Paradigms\\WASABI_Main\\hyperalignment\\videos\\practice_videos\\design\\Duck plays dead CUT.mp4',
    ori=0, pos=(0, 0), opacity=1,
    loop=False,
    depth=-1.0
    )
movie2 = visual.MovieStim3(
    win=win, name='movie',
    noAudio = False,
    filename=movieOrder[0]['runSeq'][1]['moviefile'],
    # filename='C:\\Users\\Michael\\Dropbox (Dartmouth College)\\CANLab Projects\\WASABI\\Paradigms\\WASABI_Main\\hyperalignment\\videos\\practice_videos\\design\\Duck plays dead CUT.mp4',
    ori=0, pos=(0, 0), opacity=1,
    loop=False,
    depth=-1.0
    )

# mov = visual.VlcMovieStim(win, videopath,
Beispiel #29
0
                           pos=(0, -3),
                           wrapWidth=30,
                           color='black',
                           alignHoriz='center',
                           name='bottomMsg',
                           text="bbb",
                           colorSpace='rgb')
# initialize video stimulus
#mov = visual.MovieStim(win, (params['movieDir']+params['movieFile']), size=params['movieSize'], name='Movie',
#    pos=[0,0],flipVert=False,flipHoriz=False,loop=False)
#mov = MovieStim2(win, (params['movieDir']+params['movieFile']), size=params['movieSize'], name='Movie',
#    pos=[0,0],flipVert=False,flipHoriz=False,loop=False)
mov = visual.MovieStim3(win=win,
                        filename=params['movieDir'] + params['movieFile'],
                        name='Movie',
                        pos=(0, 0),
                        flipVert=False,
                        flipHoriz=False,
                        loop=False,
                        noAudio=False)  # size=params['movieSize'],
movieClock = core.Clock()
print(mov)

# Look up prompts
[topPrompts,
 bottomPrompts] = PromptTools.GetPrompts(os.path.basename(__file__),
                                         params['promptType'], params)

# ============================ #
# ======= SUBFUNCTIONS ======= #
# ============================ #
Beispiel #30
0
                break
        if continueRoutine:
            win.flip()

    # end routine RoleCue
    for thisComponent in RoleCueComponents:
        if hasattr(thisComponent, "setAutoDraw"):
            thisComponent.setAutoDraw(False)
    VidTrials.addData('RoleText', RoleText.text)

    ISI_function(2.00)
    
    #begin routine Video
    continueRoutine = True
    VidClip = visual.MovieStim3(
        win=win, name='VidClip',units='height', noAudio = False,filename=VideoClip,
        ori=0.0, pos=(0, 0.035), opacity=1.0,loop=False,size=[1.2, .65])
    RoleVidText.setText('Imagine that you are a ' + str(roleCueText))
    CertaintyRatings.reset()
    innHundred.setText('100%')
    innKeysCount = 0
    guiltyKeysCount = 0

    y3=0
    y1=0
    VideoComponents = [VidClip, RoleVidText, yCoord, CertaintyRatings, certaintyCountText, Inn_Anchor, Guilty_Anchor, innScale, innHundred, guiltyHundred, innFifty, zero, guiltyFifty]
    for thisComponent in VideoComponents:
        thisComponent.tStart = None
        thisComponent.tStop = None
        thisComponent.tStartRefresh = None
        thisComponent.tStopRefresh = None