예제 #1
0
def load_stimuli(settings):
    """Load wav files of stimuli for a given run and subject.

    All stimuli are loaded at the beginning of the run to avoid long
    computations during the presentation of the stimuli.

    All stimuli are assumed to be located in the directory 'stimuli' in
    the present working directory.

    Assumes that order info is contained in numpy files in one directory per
    subject, for example:
        s1/s1_run1order.npy
        s1/s1_run2order.npy
        ...
        s1/s1_run20order.npy

    Parameters
    ----------
    settings : dict
        Contains various experimental settings such as MR imaging parameters

    Returns
    -------
    stimuli :  ndarray of psychopy.sound.SoundPyo objects
        Sounds to be presented in this run in the order in which they

    """
    fnames = settings['stimuli']
    stimuli = np.array([sound.SoundPyo(f) for f in fnames])

    return stimuli
예제 #2
0
def led_on():
    BoardNum = 0
    PortNum = UL.FIRSTPORTB
    Direction = UL.DIGITALOUT
    UL.cbDConfigPort (BoardNum, PortNum, Direction)
    DataValue = 1
    UL.cbDOut(BoardNum, PortNum, DataValue)
    sound_1 = sound.SoundPyo(value='C', secs=0.5, octave=4, stereo=True, volume=1.0, loops=0, sampleRate=44100, bits=16, hamming=True, start=0, stop=-1, name='', autoLog=True)
    sound_1.play()
예제 #3
0
# load Psychopy modules for visual stimuli, audio stimuli and clock
from psychopy import visual, core, sound

# set up the window where the stimuli will be presented on
win = visual.Window(size=[800, 500], color="white", fullscr=False, units="pix")

# set up the image stimulus you want to present
happy_img = visual.ImageStim(win,
                             pos=[0, 0],
                             size=[500, 500],
                             image=parent_dir + "stim/happy.jpg")

# set up the audio stimulus you want to play
laugh_wav = sound.SoundPyo(parent_dir + "stim/baby_laugh.wav",
                           start=0,
                           stop=-1)

# "draw" the stimulus to "the back of" the window
happy_img.draw()

# present the stimulus
win.flip()

# play audio as soon as the image is presented
laugh_wav.play()

# the stimulus will be presented for 5 seconds
core.wait(5)

### close everything
예제 #4
0
imageStim = visual.ImageStim(win=win,
                             name='',
                             image='Correct.png',
                             mask=None,
                             ori=0,
                             pos=[0, 0],
                             size=[10.0, 10.0],
                             color=[1, 1, 1],
                             colorSpace=u'rgb',
                             opacity=1,
                             texRes=128,
                             interpolate=False,
                             depth=-2.0)

correctSound = sound.SoundPyo(value=500, secs=feedbackTimeout)
incorrectSound = sound.SoundPyo(value=200, secs=feedbackTimeout)

# Functions


def doUserInteraction(stim, expectedKeys, timeout, soundFile):
    global paused
    if timeout == None or timeout == 0:
        timeout = sys.maxint
    startTime = trialClock.getTime()
    endTime = startTime + timeout

    response = {
        "keys": [],
        "firstKey": None,
예제 #5
0
"""
@author Thomas Churchman

Some sounds for use in the experiment.
"""

from psychopy import sound

sequencePresentSound = sound.SoundPyo(octave=4)
sequenceAnswerSound = sound.SoundPyo(octave=6)
buttonClickSound = sound.SoundPyo(octave=5, secs=0.075)
buttonClickSound.setVolume(0.5)
예제 #6
0
imageStim = visual.ImageStim(win=win,
                             name='',
                             image='Correct.png',
                             mask=None,
                             ori=0,
                             pos=[0, 0],
                             size=[10.0, 10.0],
                             color=[1, 1, 1],
                             colorSpace=u'rgb',
                             opacity=1,
                             texRes=128,
                             interpolate=False,
                             depth=-2.0)

correctSound = sound.SoundPyo(value=500, secs=0.5)
woohoo = sound.SoundPygame(value='Woohoo.wav')
'''
The "textStim", "imageStim" and "correctSound" are arbitrary names I gave to a particular object, in this case,
an TextStim, ImageStim and SoundStim.

Once they are defined, we can call them whereever and however many times we like. We can also modify them "on the fly"

There are several ways to draw stuff to the window depending on whether you want it to be continuously drawn, or
drawn for a single frame, etc. Also, it is important to note that PsychoPy utilizes a buffer space. You can think
of a buffer space as an area of memory that is "visually hidden" from the user to which we can draw stuff before we show
it so that we don't run into situations where we want to draw something but it isn't ready yet for whatever reason.

Let's take a look:
'''
예제 #7
0
                            text='Break\n\nPress a button to continue...',
                            color=(1, 1, 1))  # break
thankyou = visual.TextStim(mywin,
                           text='End of the experiment',
                           color=(1, 1, 1))  # thank you screen
question = visual.TextStim(
    mywin,
    text=
    'Did you already intend to press SPACE when you heard the beep? \n [z = JA] [m = NEE]',
    color=(1, 1, 1))  # intention question
beep = sound.SoundPyo(value='C',
                      secs=0.2,
                      octave=5,
                      stereo=True,
                      volume=1.0,
                      loops=0,
                      sampleRate=44100,
                      bits=16,
                      hamming=True,
                      start=0,
                      stop=-1)  # beep sound

#set experiment parameters
nr_images = 15
opacity = np.arange(0.0, 1.0, 0.025)
sizeMask = 8
nr_trials_per_block = 5
nr_blocks = 3
current_trial = 0
current_block = 1
order = list(xrange(1, nr_images + 1))