Ejemplo n.º 1
0
    def __init__(self, params):
        """This provides auditory (and visual?) feedback about performance """

        self.incorrect_sound = Sound(sound_freq_sweep(8000, 200, .1))
        self.correct_sound = Sound(sound_freq_sweep(1000, 1000, .1))
        self.no_response_sound = Sound(sound_freq_sweep(200, 300, .1))
        #This will be how long to wait when issued:
        self.duration = params.feedback_duration
        #set the default state to be None
        self.feedback = None
Ejemplo n.º 2
0
    def create_sounds(audio_dict,order,freq):
        '''Creates audio stimuli stream based on the order text file. Ensures that
        each syllable plays for 300ms. If the experiment is paused or the image
        cycle has been completed, it finishes.
        '''
        def render(stim):
            item = stim.items[stim.i]
            if not stim.paused and item is not None:
                item['content'].play()
                return item['value']
            else:
                return 0

        def move(stim):
            if not stim.paused:
                if stim.i+1 == len(stim.items):
                    stim.done = True
                else:
                    stim.i += 1
        # Create items
        items = []
        for k in order:
            items += [{
                'content': Sound(audio_dict['files'][int(k)],secs=0.3),
                'value': int(k)
            }] + [None for i in range(freq-1)]

        return stimulus(items,render,move,do_nothing)
Ejemplo n.º 3
0
    def __init__(self,
                 TR=1.0,
                 TA=1.0,
                 volumes=10,
                 sync='5',
                 skip=0,
                 sound=False,
                 **kwargs):
        """Class for a character-emitting metronome thread
        (emulate MR sync pulse).

        Aim: Allow testing of temporal robustness of fMRI scripts by emulating
        a hardware sync pulse. Adds an arbitrary 'sync' character to the key
        buffer, with sub-millisecond precision (less precise if CPU is maxed).
        Recommend: TR=1.000 or higher and less than 100% CPU. Shorter TR
        --> higher CPU load.

        Parameters:
            TR:      seconds between volume acquisitions
            TA:      seconds to acquire one volume
            volumes: number of 3D volumes to obtain in a given scanning run
            sync:    character used as flag for sync timing, default='5'
            skip:    how many frames to silently omit initially during T1
                     stabilization, no sync pulse. Not needed to test script
                     timing, but will give more accurate feel to start of run.
                     aka "discdacqs".
            sound:   simulate scanner noise
        """
        if TR < 0.1:
            msg = 'SyncGenerator:  whole-brain TR < 0.1 not supported'
            raise ValueError(msg)
        self.TR = TR
        self.TA = TA
        self.hogCPU = 0.035
        self.timesleep = self.TR
        self.volumes = int(volumes)
        self.sync = sync
        self.skip = skip
        self.playSound = sound
        if self.playSound:  # pragma: no cover
            self.sound1 = Sound(800, secs=self.TA, volume=0.15, autoLog=False)
            self.sound2 = Sound(813, secs=self.TA, volume=0.15, autoLog=False)

        self.clock = core.Clock()
        self.stopflag = False
        threading.Thread.__init__(self, None, 'SyncGenerator', None)
        self.running = False
Ejemplo n.º 4
0
 def __init__(self, win, manager, soundFile, fixationCross):
     self.win = win
     self.manager = manager
     self.sound = Sound(soundFile)
     self.fixationCross = fixationCross
     self.max_frame = math.ceil(
         self.sound.getDuration() * constants.FRAME_RATE +
         constants.AUDIO_DELAY * constants.FRAME_RATE)
     self.delay_frames = math.ceil(constants.AUDIO_DELAY *
                                   constants.FRAME_RATE)
     self.current_frame = 0
Ejemplo n.º 5
0
 def __init__(self):
     self.immerseT = []
     self.countT = []
     self.imgBank_instruct = []
     self.imgBank_proc = []
     self.slidenum = 0
     self.slideons = None
     self.localGraphics = GameGraphics()
     self.state = Interact()
     self.action = None
     self.sound = Sound(resourceRoot + soundfile)
     self.saveLoc = DATA_FOLDER + "PREEMPT2_%s/" + SAVE_NAME
     self.start()
Ejemplo n.º 6
0
    def __init__(self, window, sound, text=None, *args, **kwargs):
        '''Constructor for the Audio stimulus.

        Arguments:
        sound - A number (pitch in Hz), string for a note,
                or string for a filename.
                For more info, see:
                http://www.psychopy.org/api/sound.html
        text - Text to display on screen (Optional).

        Additional args and kwargs are passed to the 
        sound.Sound constructor.
        '''
        super(Audio, self).__init__(window)
        self.sound = Sound(sound, *args, **kwargs)
        self.text = text
Ejemplo n.º 7
0
def play_file(path, msg, trigger_port, trigger_twice=False):
    all_events.append({
        'what': "audio played",
        'when': core.getTime() - experiment_start,
        'content': path,
        'message': msg,
        'response': None
    })
    msg = visual.TextStim(win, text=msg)
    msg.draw()
    win.flip()
    mySound = Sound(path)
    if (trigger_port):
        sendTrigger(trigger_port, duration=0.01)
    mySound.play()
    core.wait(mySound.getDuration())
    if (trigger_port and trigger_twice):
        sendTrigger(trigger_port, duration=0.01)
Ejemplo n.º 8
0
def test_update(sound_mock):
    manager = Mock()
    sound_mock.return_value = Sound()
    sound_mock.return_value.duration = 1
    scene = AudioScene(Mock(), manager, Mock(), Mock())
    scene.draw = Mock()

    #Act
    scene.update()
    #Assert
    manager.set_response_scene.assert_not_called()
    #Act
    [
        scene.update() for _ in range(
            math.ceil(constants.FRAME_RATE +
                      constants.FRAME_RATE * constants.AUDIO_DELAY - 1))
    ]
    #Assert
    manager.set_response_scene.assert_called_once()
    assert scene.draw.call_count == math.ceil(constants.FRAME_RATE +
                                              constants.FRAME_RATE *
                                              constants.AUDIO_DELAY)
Ejemplo n.º 9
0
        return str(self.id)


### Global variables for rendering stimuli
sone = subject(1, "act", 0.3, None, window.size[0] / -4)
stwo = subject(2, "obs", 0.7, None, window.size[0] / 4)
subjects = [sone, stwo]

expinfo = {'participant1': sone.id, 'participant2': stwo.id, 'pair': 1}
#expinfo = {'participant1': sone.id}

blocks = range(4)
ntrials = 2

# create beep for decision interval
beep = Sound('A', secs=0.5)


def genstartscreen():
    visual.TextStim(window,
                    text="Press spacebar to start.",
                    pos=[0 + sone.xoffset, 0],
                    color='black',
                    height=20).draw()

    visual.TextStim(window,
                    text="Press spacebar to start.",
                    pos=[0 + stwo.xoffset, 0],
                    color='black',
                    height=20).draw()
Ejemplo n.º 10
0
 def setup_sounds(self):
     self.cashsnd = Sound('resources/cash.wav')
     self.firesnd = Sound('resources/bbhit.wav')
     self.buzzsnd = Sound('resources/buzz.wav')
Ejemplo n.º 11
0
###############################################################################
# .. code-block:: python
#
#   import serial
#   ser = serial.Serial('COM4')  # Change this value according to your setup

#%%
# Create an Oximeter instance, initialize recording and record for 10 seconds

oxi = Oximeter(serial=ser, sfreq=75, add_channels=4).setup()

#%%
# Create an Oxymeter instance, initialize recording and record for 10 seconds

beat = Sound("C", secs=0.1)
diastole1 = Sound("E", secs=0.1)
diastole2 = Sound("G", secs=0.1)
diastole3 = Sound("Bfl", secs=0.1)

systoleTime1, systoleTime2, systoleTime3 = None, None, None
tstart = time.time()
while time.time() - tstart < 30:

    # Check if there are new data to read
    while oxi.serial.inWaiting() >= 5:

        # Convert bytes into list of int
        paquet = list(oxi.serial.read(5))

        if oxi.check(paquet):  # Data consistency
Ejemplo n.º 12
0
def scene(sound_mock, fileMock):
    sound_mock.return_value = Sound()
    sound_mock.return_value.duration = 1
    return AudioScene(Mock(), Mock(), sound_mock, Mock())
Ejemplo n.º 13
0
from tools import sound_freq_sweep, compound_sound
import numpy as np
from psychopy.sound import Sound
from psychopy import visual

p = dict(
    # Display:
    monitor='CRT_NEC_FE992',
    full_screen=True,
    screen_number=1,
    # Sounds:
    correct_sound=Sound(sound_freq_sweep(2000, 2000, .1)),
    incorrect_sound=Sound(sound_freq_sweep(8000, 200, .1)),
    # General:
    n_trials=150,
    fixation_size=0.1,
    rgb=np.array([1., 1., 1.]),
    # Element array:
    sf=4,
    elems_per_row=30,
    elem_size=2.5,
    elem_spacing=1,
    jitter=0.08,
    res=128,
    # Cue:
    cue_size=[2, 2],
    line_width=5,
    # Timing:
    cue_dur=0.2,
    cue_to_ea=0.6,
    #texture_dur =  0.05,
Ejemplo n.º 14
0
                              color=[-1, -1, -1])

Hz = 0
while (Hz < 50 or Hz > 150):
    Hz = win.getActualFrameRate(nIdentical=20, nMaxFrames=80, nWarmUpFrames=10)
    Hz = round(Hz)
    print(Hz)
ifi = 1 / Hz

gap_frames = round(gap_dur / ifi)
grating_frames = round(grating_dur / ifi)

# create sound
medf_s = Sound(800,
               sampleRate=44100,
               secs=grating_frames * ifi,
               stereo=True,
               loops=0)

trial_times = np.array([])

trial_times = win.flip()
for itrial in range(ntrials):
    for gframe in range(int(gap_frames)):
        #print(gframe)
        fixation.draw()
        win.flip()

    for gtframe in range(int(grating_frames)):
        if gtframe == 0:
            if ppt: win.callOnFlip(p_port.setData, int(itrial + 1))
###############################################################################
# .. code-block:: python
#
#   import serial
#   ser = serial.Serial('COM4')  # Change this value according to your setup

#%%
# Create an Oximeter instance, initialize recording and record for 10 seconds

oxi = Oximeter(serial=ser, sfreq=75, add_channels=4).setup()

#%%
# Create an Oxymeter instance, initialize recording and record for 10 seconds

beat = Sound('C', secs=0.1)
diastole1 = Sound('E', secs=0.1)
diastole2 = Sound('G', secs=0.1)
diastole3 = Sound('Bfl', secs=0.1)

systoleTime1, systoleTime2, systoleTime3 = None, None, None
tstart = time.time()
while time.time() - tstart < 30:

    # Check if there are new data to read
    while oxi.serial.inWaiting() >= 5:

        # Convert bytes into list of int
        paquet = list(oxi.serial.read(5))

        if oxi.check(paquet):  # Data consistency
Ejemplo n.º 16
0
                           height=0.15,
                           antialias=True,
                           bold=False,
                           italic=False,
                           alignHoriz='center',
                           alignVert='center',
                           fontFiles=(),
                           wrapWidth=None,
                           flipHoriz=False,
                           flipVert=False,
                           languageStyle='LTR',
                           name=None,
                           autoLog=None)

#instantiating the sound stimulus to save time
stimulus = Sound(os.path.join(path_stimuli, stimulus_name + '.wav'))
stop_stim = Sound(stop_wav)
#initializing the index to get the first image in the array
stim_idx = 0

#creation of the duration variables
task_duration, baseline_duration = 20, 20

end_stimuli = 420  #20*11baseline + 20*10baseline
baselines_onset = np.arange(1, end_stimuli, 40)
baselines_offset = baselines_onset + 19

task_onset = np.arange(21, end_stimuli - 20, 40)
task_offset = task_onset + 19

###############################################################################
    def __init__(self, tracker, win):
        '''Initialize

        tracker: an EyeLink instance (connection)
        win: the PsychoPy window we use for calibration'''

        pylink.EyeLinkCustomDisplay.__init__(self)

        # background and target color
        self._backgroundColor = win.color
        self._foregroundColor = 'black'

        # window to use for calibration
        self._display = win
        # make the mouse cursor invisible
        self._display.mouseVisible = False

        # display width & height
        self._w, self._h = win.size

        # resolution fix for Mac retina displays
        if 'Darwin' in platform.system():
            sys_cmd = 'system_profiler SPDisplaysDataType | grep Retina'
            is_ret = os.system(sys_cmd)
            if is_ret == 0:
                self._w = int(self._w / 2.0)
                self._h = int(self._h / 2.0)

        # store camera image pixels in an array
        self._imagebuffer = array.array('I')

        # store the color palette for camera image drawing
        self._pal = None

        # initial size of the camera image
        self._size = (384, 320)

        # initial mouse configuration
        self._mouse = event.Mouse(False)
        self.last_mouse_state = -1

        # camera image title
        self._msgHeight = self._size[1] / 16.0
        self._title = visual.TextStim(self._display,
                                      '',
                                      wrapWidth=self._w,
                                      color=self._foregroundColor)

        # calibration target
        self._targetSize = self._w / 64.
        self._tar = visual.Circle(self._display,
                                  size=self._targetSize,
                                  lineColor=self._foregroundColor,
                                  lineWidth=self._targetSize / 2)

        # calibration sounds (beeps)
        self._target_beep = Sound('type.wav', stereo=True)
        self._error_beep = Sound('error.wav', stereo=True)
        self._done_beep = Sound('qbeep.wav', stereo=True)

        # a reference to the tracker connection
        self._tracker = tracker

        # for a clearer view we always enlarge the camera image
        self.imgResize = None
Ejemplo n.º 18
0
    def __init__(self, subject_initials, index_number, tr, start_block,
                 config):
        super(StopSignalSession, self).__init__(
            subject_initials,
            index_number,
            tr=tr,
            simulate_mri_trigger=False,
            # NB: DO NOT use this MRI simulation option, but rather another!
            mri_trigger_key=config.get('mri', 'mri_trigger_key'))

        self.config = config
        self.start_block = start_block  # allows for starting at a later block than 1
        self.warmup_trs = config.get('mri', 'warmup_trs')

        if tr == 2:
            self.trial_duration = 8 - .5
        elif tr == 3:
            self.trial_duration = 9 - 0.5
        if self.subject_initials == 'pilot':
            self.trial_duration = [8.5, 7.5, 8.5, 7.5]

        if config.get('audio', 'engine') == 'psychopy':
            # BEFORE moving on, ensure that the correct audio driver is selected
            from psychopy import prefs
            prefs.general['audioLib'] = config.get('audio', 'backend')
            from psychopy.sound import Sound

            self.bleeper = Sound(secs=0.1,
                                 octave=5,
                                 loops=0,
                                 sampleRate=44100,
                                 name='')
            # self.bleeper.play()

        elif config.get('audio', 'engine') == 'TK':
            self.setup_sound_system()
            self.read_sound_file('sounds/0.wav', '0')

            # to test sound:
            # self.play_sound(sound_index=0)

        self.response_button_signs = [
            config.get('input', 'response_button_left'),
            config.get('input', 'response_button_right')
        ]

        screen = self.create_screen(
            engine='psychopy',
            size=config.get('screen', 'size'),
            full_screen=config.get('screen', 'full_screen'),
            background_color=config.get('screen', 'background_color'),
            gamma_scale=config.get('screen', 'gamma_scale'),
            physical_screen_distance=config.get('screen',
                                                'physical_screen_distance'),
            physical_screen_size=config.get('screen', 'physical_screen_size'),
            max_lums=config.get('screen', 'max_lums'),
            wait_blanking=config.get('screen', 'wait_blanking'),
            screen_nr=config.get('screen', 'screen_nr'),
            mouse_visible=config.get('screen', 'mouse_visible'))

        # Try this
        # TODO: think about really including this?
        self.screen.recordFrameIntervals = True

        self.phase_durations = np.array([
            -0.0001,  # wait for scan pulse
            -5,
            1,
            -5
        ])  # the strings will be filled before every trial

        self.load_design()
        self.prepare_objects()
        self.prepare_staircase()
Ejemplo n.º 19
0
from tools import sound_freq_sweep, compound_sound

from psychopy.sound import Sound

p = dict(
    color_target=True,
    monitor='SportsMedicineLab',
    screen_number=0,
    scanner=False,
    eye_motion=True,
    full_screen=False,
    save_fig=True,
    #Feedback sounds
    incorrect_sound=Sound(sound_freq_sweep(8000, 200, .1)),
    correct_sound=Sound(sound_freq_sweep(2000, 2000, .1)),
    no_response_sound=Sound(sound_freq_sweep(500, 700, .1)),
    eye_motion_sound=Sound(compound_sound([200, 400, 800], .2)),
    fixation_size=3.0,
    target_dur=0.1,
    response_dur=1.5,
    feedback_dur=0.3,
    iti=0.24,
    isi=0.3,
    interblock_interval=0.54,
    # block_duration = 12, Enforce this!
    n_trials=5,
    n_blocks=12,
    target_size=4,
    target_x=4,
    target_y=4,
    sf_base=1,
Ejemplo n.º 20
0
                           autoLog=None)

#clock initialization
globalClock = core.Clock()

#%%############################################################################
#                      THESE VARIABLES DEPEND ON                             #
#                      THE EXPERIMENTAL DESIGN                               #
###############################################################################
audio_stim_name = input('Insert the filename of the audio stimulus: ')
nr_NF_session = input('Number of the NF session (1,2,3...): ')

#path of the stimulus
stimulus_path = os.path.join(wdir,
                             'sounds/stimuli/' + audio_stim_name + '.wav')
stimulus = Sound(stimulus_path)
#path of the stop audio file
stop_wav = os.path.join(wdir, 'sounds/stop.wav')
stop_stim = Sound(stop_wav)

#create a new output directory for the FB images
outdir = os.path.join(os.path.abspath(os.path.join(sub_json_file, os.pardir)),
                      audio_stim_name + '_output_' + str(nr_NF_session))
if not os.path.exists(outdir):
    os.makedirs(outdir)

#condition timings
baselines = pickle.load(
    open(os.path.join(wdir, 'prt/intermittent/baselines.pkl'), 'rb'))
tasks = pickle.load(
    open(os.path.join(wdir, 'prt/intermittent/tasks.pkl'), 'rb'))
Ejemplo n.º 21
0
kb_events = None

rise_amnt = 10
t_start = time.time()

flip_bit = 1 #this is just to flip the sign of width change

#-------------------------------
# SETUP SOUND STIMS
freq_factor = 2*np.pi*np.linspace(0,0.5,22050)
freqL = 440
freqR = 440
soundL = np.sin(freq_factor*freqL)
soundR = np.sin(freq_factor*freqR)
s_out = Sound(np.array([soundL,soundR]).T,secs=10)
#
#-------------------------------
t=1
while not kb_events:
    s_out.stop()
    if t==1:
        s_out.play()
        t=0
    
#    soundL = np.sin(freq_factor*(freqL+10))
#    soundR = np.sin(freq_factor*freqR)
#    s_out.setSound = np.array([soundL,soundR]).T
    # Get the current mouse position
    # posDelta is the change in position * since the last call *
    position, posDelta = mouse.getPositionAndDelta()
Ejemplo n.º 22
0
ns = egi.Netstation()
ns.connect('10.10.10.42', 55513)
ns.BeginSession()
#
ns.sync()
#
ns.StartRecording()
#
# sampleDir = "./samples"
#
# sampleFilenames = [path.join(sampleDir, f) for f in listdir(sampleDir)]

# psychopy.voicekey.samples_from_file(sampleFilenames[0])

# trials = TrialHandler(sampleFilenames, 1, method="random")
s = Sound()

win = visual.Window()
msg = visual.TextStim(win, text="Press a key to hear a sound, Q to quit")

s.setSound("./samples/13.wav")
while True:
	msg.draw()
	win.flip()
	k = event.getKeys()
	if len(k)>0:
		if 'q' in k:
			break
		ns.sync()
		s.play()
		ns.send_event('evt1')