Example #1
0
    def test_AdvAudioCapture(self):
        filename = os.path.join(self.tmp, 'test_mic.wav')
        mic = microphone.AdvAudioCapture(autoLog=False)
        tone = sound.Sound(440, secs=.02, autoLog=False)
        mic.setMarker(tone=tone)
        mic = microphone.AdvAudioCapture(filename=filename,
                                         saveDir=self.tmp,
                                         autoLog=False)

        mic.record(1, block=True)
        mic.setFile(mic.savedFile)  # same file name
        mic.getMarkerOnset()

        mic.compress()
        assert os.path.exists(mic.savedFile)
        assert mic.savedFile.endswith('.flac')

        mic.uncompress()
        assert mic.savedFile.endswith('.wav')
        assert os.path.exists(mic.savedFile)

        old_size = os.path.getsize(mic.savedFile)
        new_file = mic.resample(keep=False)
        assert old_div(old_size, 3.1) < os.path.getsize(new_file) < old_div(
            old_size, 2.9)
        mic.getLoudness()

        mic.playback()
        mic.playback(loops=2, block=False)
        mic.playback(stop=True)

        tmp = mic.savedFile
        mic.savedFile = None
        with pytest.raises(ValueError):
            mic.playback()
        with pytest.raises(ValueError):
            mic.getLoudness()
        mic.savedFile = tmp

        mic.resample(keep=False)
        mic.resample(newRate=48000, keep=False)
        tmp = mic.savedFile
        mic.savedFile = None
        with pytest.raises(ValueError):
            mic.resample(keep=False)
        mic.savedFile = tmp
        with pytest.raises(ValueError):
            mic.resample(newRate=-1)
Example #2
0
    def test_AudioCapture_basics(self):
        microphone.haveMic = False
        with pytest.raises(microphone.MicrophoneError):
            microphone.AdvAudioCapture(autoLog=False)
        microphone.haveMic = True

        microphone.switchOn(16000, 1, 2048)
        microphone.switchOn(48000)

        mic = microphone.AdvAudioCapture(saveDir=self.tmp, autoLog=False)
        mic = microphone.AdvAudioCapture(saveDir=self.tmp + '_test',
                                         autoLog=False)
        mic.record(.10, block=False)  # returns immediately
        core.wait(.02)
        mic.stop()
        mic.reset()

        mic.record(0.2, block=True)
        assert os.path.isfile(mic.savedFile)
Example #3
0
    def __init__(self, CONF):
        "Initialize microphone"
        path = os.path.join("output", "recordings",
                            CONF["participant"] + "_" + CONF["session"])
        if not os.path.exists(path):
            os.makedirs(path)
        self.path = path
        self.CONF = CONF

        microphone.switchOn()
        self.mic = microphone.AdvAudioCapture()
Example #4
0
def NART_task(myClock, datafn):
    wavDirName = datafn + '_wav'
    if not os.path.isdir(wavDirName):
        os.makedirs(wavDirName)  # to hold .wav files
    microphone.switchOn()
    mic = microphone.AdvAudioCapture(name='mic',
                                     saveDir=wavDirName,
                                     stereo=True)
    import codecs
    stimuli_list = codecs.open('Stimuli\\nart_wordlist.txt', 'r',
                               'utf-8').read().split('\n')
    for this in stimuli_list:
        fixation_screen(myClock, waittime=1)
        NART_word.setText(this)
        NART_word.draw()
        win.flip()
        wavfile = mic.record(1200)
        event.waitKeys(keyList=['space'])
        mic.stop()
        if event.getKeys(keyList=['escape']):
            mic.stop()
            quitEXP(True)
Example #5
0

def plotYX(yaxis, xaxis, description=''):
    pyplot.plot(xaxis, yaxis)
    pyplot.grid(True)
    pyplot.title(description)
    pyplot.ylabel('[std %.1f]' % np.std(yaxis))
    pyplot.draw()
    pyplot.show()


# initial set up:
win = visual.Window(fullscr=False, units='height')
circle = visual.Circle(win, 0.25, fillColor=1, edges=64)
microphone.switchOn()
mic = microphone.AdvAudioCapture()

# identify the hardware microphone in use:
names, idx = sound.backend.pyo.pa_get_input_devices()
inp = sound.backend.pyo.pa_get_default_input()
msg = 'Speaker vol > 0\nAny key to start...\n\n"%s"' % names[idx.index(inp)]

instr = visual.TextStim(win, msg, color=-1, height=0.05)
text = visual.TextStim(win,
                       "Any key to see\nthe recording",
                       color=-1,
                       height=0.05)
msg2 = visual.TextStim(win,
                       "Close plot window to continue",
                       color=-1,
                       height=0.05)
Example #6
0
    currentLoop = trials
    # abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb)
    if thisTrial != None:
        for paramName in thisTrial:
            exec('{} = thisTrial[paramName]'.format(paramName))

    # ------Prepare to start Routine "trial"-------
    t = 0
    trialClock.reset()  # clock
    frameN = -1
    continueRoutine = True
    routineTimer.add(4.000000)
    # update component parameters for each repeat
    image_1.setImage(images)
    mic_1 = microphone.AdvAudioCapture(name='mic_1',
                                       saveDir=wavDirName,
                                       stereo=False,
                                       chnl=0)
    # keep track of which components have finished
    trialComponents = [image_1, mic_1]
    for thisComponent in trialComponents:
        thisComponent.tStart = None
        thisComponent.tStop = None
        thisComponent.tStartRefresh = None
        thisComponent.tStopRefresh = None
        if hasattr(thisComponent, 'status'):
            thisComponent.status = NOT_STARTED

    # -------Start Routine "trial"-------
    while continueRoutine and routineTimer.getTime() > 0:
        # get current time
        t = trialClock.getTime()
Example #7
0
    if not test == 1:
        if len(speechList) != numTrials:
            print speechList
            print 'Exiting...: Unexpected number of qualified *.mp4 files'
            core.quit()

tmpSoundFile = normjoin(rootPath, 'temp.wav')
babblePath = normjoin(rootPath, 'Babble')
babbleList = ['babble' + str(f) for f in table['BabbleFile']]
bab1File = normjoin(babblePath, 'babble1.wav')
info, bab1 = scipy.io.wavfile.read(bab1File)
babbleRMS = rms(bab1)

# Set up microphone, must be 16000 or 8000 Hz for speech recognition
microphone.switchOn(sampleRate=16000)
mic = microphone.AdvAudioCapture(name='mic', saveDir=dataOutPath, stereo=False)

#Initiate the PsychPy window
win = visual.Window([1920, 1080])
#sound.init(48000,buffer=500)

if not test:
    #Present an example of the talker without noise. No response taken.
    keystext = "Please listen to the example sentence.Press spacebar when ready. "
    text = visual.TextStim(win, keystext, pos=(0, 0), units='pix')
    text.draw()
    win.flip()
    core.wait(0.5)
    k = event.waitKeys()

    #Present an example of the talker without noise. No response taken.
Example #8
0
 #-------Ending Routine "trial"-------
 for thisComponent in trialComponents:
     if hasattr(thisComponent, "setAutoDraw"):
         thisComponent.setAutoDraw(False)
 sound_1.stop() #ensure sound has stopped at end of routine
 
 # the Routine "trial" was not non-slip safe, so reset the non-slip timer
 routineTimer.reset()
 
 #------Prepare to start Routine "trialpost"-------
 t = 0
 trialpostClock.reset()  # clock 
 frameN = -1
 routineTimer.add(10.000000)
 # update component parameters for each repeat
 mic = microphone.AdvAudioCapture(name='mic', saveDir=wavDirName, stereo=False)
 
 # keep track of which components have finished
 trialpostComponents = []
 trialpostComponents.append(speak_instruct)
 trialpostComponents.append(mic)
 for thisComponent in trialpostComponents:
     if hasattr(thisComponent, 'status'):
         thisComponent.status = NOT_STARTED
 
 #-------Start Routine "trialpost"-------
 continueRoutine = True
 while continueRoutine and routineTimer.getTime() > 0:
     # get current time
     t = trialpostClock.getTime()
     frameN = frameN + 1  # number of completed frames (so 0 is the first frame)
Example #9
0
from psychopy import microphone, core, prefs

prefs.general['audioLib'] = ['pyo']

microphone.switchOn(sampleRate=44100)
mic = microphone.AdvAudioCapture(stereo=False)

mic.record(sec=2, filename='../recordings/test.wav')

core.quit()


Example #10
0
def trial(self, clock, window, shapes, mouse, keys, text_color, wait_time,
          warning_time, exp, count, ser):
    """
    Main speech type function
    :param clock: clock used for standardized timing; initialized in the main experimental loop
    :param window: display window
    :param shapes: array of shape objects to be used (not already randomized)
    :param mouse: mouse device
    :param text_color: color for text
    :param wait_time: max seconds for trial to wait before continuing if trial is not completed
    :param warning_time: num of seconds left to begin countdown
    :param exp: experiment object for adding trial data
    :param count: number of speech trials during this experiment for file naming
    :param ser: serial port that links to XBee for syncing
    :return: status of trial where 0 = completed but incorrect; 1 = completed and correct; 2 = incomplete
    """

    # Default Value Set Up for Timing #
    global stimulus_beg_time
    stimulus_beg_time = -1
    global in_between_time
    in_between_time = -1
    global total_stimuli_time
    total_stimuli_time = -1

    # Text values
    count_label = visual.TextStim(window,
                                  units='norm',
                                  text=u'',
                                  pos=[0, -0.6],
                                  height=0.2,
                                  color=text_color,
                                  colorSpace='rgb255',
                                  alignHoriz='center',
                                  alignVert='center')

    second_label = visual.TextStim(window,
                                   units='norm',
                                   text=u'Speak color of blocks',
                                   pos=[0, 0.3],
                                   height=0.1,
                                   color=text_color,
                                   colorSpace='rgb255',
                                   alignHoriz='center',
                                   alignVert='center')

    done_label = visual.TextStim(window,
                                 units='norm',
                                 text=u'Done',
                                 pos=[0, -0.25],
                                 height=0.1,
                                 color=text_color,
                                 colorSpace='rgb255',
                                 alignHoriz='center',
                                 alignVert='center')

    done_button = visual.Rect(window,
                              width=0.5,
                              height=0.25,
                              lineColor=(0, 0, 0),
                              lineWidth=2,
                              lineColorSpace='rgb',
                              pos=(0, -0.25))

    next_label = visual.TextStim(window,
                                 units='norm',
                                 text=u'Speech Round',
                                 pos=[0, 0],
                                 height=0.1,
                                 color=text_color,
                                 colorSpace='rgb',
                                 alignHoriz='center',
                                 alignVert='center')

    BLOCK_LIST = [second_label, done_button, done_label]

    # Display round name
    helper.displayNewRound(window, next_label)

    # Microphone Set Up #
    microphone.switchOn(sampleRate=16000)
    name = "speech_exp_%d.wav" % count
    mic = microphone.AdvAudioCapture(filename=name)
    # todo: can edit marker to output as sync signal; played when recording starts
    # marker currently set to not output any sound on onset of recording
    mic.setMarker(tone=5000, secs=0.015, volume=0.0)

    # Block Sequence Display #
    print "%f BEGIN BLOCK SEQUENCE" % (clock.getTime())
    ser.write("Begin Sequence")
    global in_between_time
    in_between_time = helper.drawSequence(window, shapes, clock)
    ser.write("End Sequence")
    print "%f END BLOCK SEQUENCE" % (clock.getTime())

    # for block interaction #
    self.hub.clearEvents()
    start_time = clock.getTime()
    timeout_counter = 0
    self.hub.clearEvents()

    # store time right when clicking stimuli is presented for reference
    window.callOnFlip(track_speech_time, clock, mouse)
    window.flip()

    # records for length of specified wait time
    mic.record(wait_time, block=False)
    ser.write("Start")
    while mic.recorder.running:
        [s.draw() for s in BLOCK_LIST]
        count_label.draw()
        window.flip()
        timeout_counter += 1

        ## FOR MOUSE-CLICK END: ##
        # buttons, times = mouse.getPressed(getTime=True)
        # if mouse.isPressedIn(done_button, buttons=[0]):
        #     break

        ## FOR KEYBOARD END: ##
        events = keys.getKeys()
        if len(events) != 0:
            break

        # adjust countdown value, to be displayed with the next flip
        if timeout_counter >= (
            (wait_time - warning_time) * 60) and timeout_counter % 60 == 0:
            count_label.setText(((wait_time * 60) - timeout_counter) / 60)

    # turn off microphone and saves the audio file automatically
    ser.write("Finish")
    microphone.switchOff()
    finish_time = clock.getTime()

    # once the round is finished, use previous counters to calculate total time spent and individual click times
    total_stimuli_time = finish_time - speech_beg_time
    print "\n%f" % (finish_time)
    print "%f TOTAL TIME TO FINISH ROUND" % (total_stimuli_time)

    # save data in the experiment file
    exp.addData("stimulus_begin_time", speech_beg_time)
    exp.addData("in_between_time", in_between_time)
    exp.addData("total_stimuli_time", total_stimuli_time)
    exp.addData("time1", start_time)
    exp.addData("time2", finish_time)

    # return status code based on correctness of sequence
    if timeout_counter == wait_time * 60:
        return 2
    if timeout_counter < wait_time * 60:  # assume finished normally by clicking button
        return 0
    return -1
Example #11
0
    # ------Prepare to start Routine "trial"-------
    routineTimer.add(16.000000)
    # update component parameters for each repeat
    text1.setText(word1)
    sound1.setSound(wrecord1, secs=4.0, hamming=True)
    sound1.setVolume(1, log=False)
    text2.setText(word2)
    sound2.setSound(wrecord2, secs=4.0, hamming=True)
    sound2.setVolume(1, log=False)
    prime_text.setText(prime)
    prime_sound.setSound(precord3, secs=4, hamming=True)
    prime_sound.setVolume(1, log=False)
    target_text.setText(target)
    pan_record = microphone.AdvAudioCapture(name='pan_record',
                                            saveDir=wavDirName,
                                            stereo=False,
                                            chnl=0)
    record_notification.setText('Please say the word: pan')
    # keep track of which components have finished
    trialComponents = [
        text1, sound1, text2, sound2, prime_text, prime_sound, target_text,
        pan_record, record_notification
    ]
    for thisComponent in trialComponents:
        thisComponent.tStart = None
        thisComponent.tStop = None
        thisComponent.tStartRefresh = None
        thisComponent.tStopRefresh = None
        if hasattr(thisComponent, 'status'):
            thisComponent.status = NOT_STARTED
    # reset timers
Example #12
0
    if thisQuestion != None:
        for paramName in thisQuestion.keys():
            exec(paramName + '= thisQuestion.' + paramName)

    # ------Prepare to start Routine "Q"-------
    t = 0
    QClock.reset()  # clock
    frameN = -1
    continueRoutine = True
    # update component parameters for each repeat
    current_question.setText(question)
    go_on_Q = event.BuilderKeyResponse()
    wavName = wavDirName + '/' + expInfo['participant'] + '.' + expInfo[
        'episode'] + expInfo['condition'] + '_' + str(Qnum) + '.wav'
    record = microphone.AdvAudioCapture(name='record',
                                        filename=wavName,
                                        stereo=False)
    # keep track of which components have finished
    QComponents = [
        current_question, go_on_Q, a_pic, a_text, b_pic, b_text, m_pic, m_text,
        d_pic, d_text, r_pic, r_text, t_pic, t_text, record
    ]
    for thisComponent in QComponents:
        if hasattr(thisComponent, 'status'):
            thisComponent.status = NOT_STARTED

    # -------Start Routine "Q"-------
    while continueRoutine:
        # get current time
        t = QClock.getTime()
        frameN = frameN + 1  # number of completed frames (so 0 is the first frame)