예제 #1
0
def runNoiseExp(appObj, testMode=False):
    print("runNoiseExp")
    appObj.tabWidget.setCurrentIndex(5)
    appObj.doneFlag = False
    appObj.isCollecting = True
    # trigRate = octfpga.GetTriggerRate()
    audioHW = appObj.audioHW
    outputRate = audioHW.DAQOutputRate
    inputRate = audioHW.DAQInputRate

    cutoffLow = 1e3 * appObj.noiseExp_filterLow_dblSpinBox.value()
    cutoffHigh = 1e3 * appObj.noiseExp_filterHigh_dblSpinBox.value()
    durationMins = appObj.noiseExp_duration_dblSpinBox.value()
    durationSecs = durationMins * 60

    try:
        if not testMode:
            from DAQHardware import DAQHardware
            daq = DAQHardware()

        chanNamesIn = [audioHW.mic_daqChan]
        micVoltsPerPascal = audioHW.micVoltsPerPascal
        # mode = 'chirp'

        # make 1 second of noise
        nOut = outputRate * 1
        magResp = np.zeros(nOut)
        fIdx1 = int(nOut * cutoffLow / outputRate)
        fIdx2 = int(nOut * cutoffHigh / outputRate)
        magResp[fIdx1:fIdx2] = 1
        phaseResp = 2 * np.pi * np.random.rand(nOut) - np.pi
        sig = magResp * np.exp(-1j * phaseResp)
        spkOut = np.real(np.fft.ifft(sig))
        mx = np.max(spkOut)
        mn = np.min(spkOut)
        # normalies signal to be between -1 and 1
        spkOut = 2 * (spkOut - mn) / (mx - mn) - 1

        maxV = audioHW.speakerOutputRng[1]
        spkOut = maxV * spkOut

        pl = appObj.spCalTest_output
        pl.clear()
        #endIdx = int(5e-3 * outputRate)        # only plot first 5 ms
        npts = len(spkOut)
        endIdx = npts
        t = np.linspace(0, npts / outputRate, npts)
        pl.plot(t[0:endIdx], spkOut[0:endIdx], pen='b')

        numInputSamples = int(inputRate * 0.1)

        if testMode:
            # mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)
            pass
        else:
            chanNameOut = audioHW.speakerL_daqChan
            attenLines = audioHW.attenL_daqChan
            attenLinesOther = audioHW.attenR_daqChan

            if not testMode:
                AudioHardware.Attenuator.setLevel(0, attenLines)
                AudioHardware.Attenuator.setLevel(60, attenLinesOther)

            # setup the output task
            daq.setupAnalogOutput([chanNameOut],
                                  audioHW.daqTrigChanIn,
                                  int(outputRate),
                                  spkOut,
                                  isContinuous=True)
            daq.startAnalogOutput()

            # trigger the acquiisiton by sending ditital pulse
            daq.sendDigTrig(audioHW.daqTrigChanOut)

        tElapsed = 0
        tLast = time.time()
        npts = numInputSamples
        t = np.linspace(0, npts / inputRate, npts)

        while tElapsed < durationSecs:
            if not testMode:
                # setup the input task
                daq.setupAnalogInput(chanNamesIn, audioHW.daqTrigChanIn,
                                     int(inputRate), numInputSamples)
                daq.startAnalogInput()

                # trigger the acquiisiton by sending ditital pulse
                daq.sendDigTrig(audioHW.daqTrigChanOut)

                daq.waitDoneInput()

                mic_data = daq.readAnalogInput()
                mic_data = mic_data[0, :]

                daq.stopAnalogInput()
                daq.clearAnalogInput()
            else:
                mic_data = np.random.rand(numInputSamples)

            mic_data = mic_data / micVoltsPerPascal

            pl = appObj.spCalTest_micInput
            pl.clear()
            pl.plot(t, mic_data, pen='b')

            labelStyle = appObj.xLblStyle
            pl.setLabel('bottom', 'Time', 's', **labelStyle)
            labelStyle = appObj.yLblStyle
            pl.setLabel('left', 'Response', 'Pa', **labelStyle)

            # calculate RMS
            nMicPts = len(mic_data)
            micRMS = np.mean(mic_data**2)
            micRMS = 20 * np.log10(micRMS**0.5 / 2e-5)
            appObj.spCalTest_rms_label.setText("%0.3f dB" % micRMS)

            nfft = int(2**np.ceil(np.log(nMicPts * 5) / np.log(2)))
            print("nfft = ", nfft)
            win_fcn = np.hanning(nMicPts)
            micFFT = 2 * np.abs(np.fft.fft(win_fcn * mic_data, nfft)) / nMicPts
            micFFT = 2 * micFFT[0:len(micFFT) // 2]
            micFFT = 20 * np.log10(micFFT / 2e-5)
            freq = np.linspace(0, inputRate / 2, len(micFFT))
            pl = appObj.spCalTest_micFFT
            pl.clear()
            #df = freq[1] - freq[0]
            #print("NoiseExp: df= %0.3f i1= %d i2= %d nf= %d" % (df, i1, i2, nf))
            pl.plot(freq, micFFT, pen='b')
            labelStyle = appObj.xLblStyle
            pl.setLabel('bottom', 'Frequency', 'Hz', **labelStyle)
            labelStyle = appObj.yLblStyle
            pl.setLabel('left', 'Magnitude', 'db SPL', **labelStyle)

            QtGui.QApplication.processEvents(
            )  # check for GUI events, such as button presses

            # if done flag, break out of loop
            if appObj.doneFlag:
                break

            t1 = time.time()
            tElapsed += (t1 - tLast)
            tLast = t1

        if not testMode:
            daq.stopAnalogOutput()
            daq.clearAnalogOutput()

    except Exception as ex:
        traceback.print_exc(file=sys.stdout)
        QtGui.QMessageBox.critical(
            appObj, "Error",
            "Error during calibration. Check command line output for details")

    8  # update the audio hardware speaker calibration
    appObj.isCollecting = False
    QtGui.QApplication.processEvents(
    )  # check for GUI events, such as button presses
    appObj.finishCollection()
예제 #2
0
def runSpeakerCal(appObj, testMode=False):
    print("runSpeakerCal")
    appObj.tabWidget.setCurrentIndex(0)
    appObj.doneFlag = False
    appObj.isCollecting = True
    # trigRate = octfpga.GetTriggerRate()
    audioHW = appObj.audioHW
    outputRate = audioHW.DAQOutputRate
    inputRate = audioHW.DAQInputRate
    
    if testMode:
        testDataDir = os.path.join(appObj.basePath, 'exampledata', 'Speaker Calibration')
#        filePath = os.path.join(testDataDir, 'AudioParams.pickle')
#        f = open(filePath, 'rb')
#        audioParams = pickle.load(f)
#        f.close()
    else:
        freqArray = appObj.getFrequencyArray()
        
    # numSpk = audioParams.getNumSpeakers()
    numSpk = 1
    cIdx = appObj.speaker_comboBox.currentIndex()
    if cIdx > 0:
        numSpk = 2
    
    if not testMode:
        from DAQHardware import DAQHardware
        daq = DAQHardware()

    chanNamesIn= [ audioHW.mic_daqChan]
    micVoltsPerPascal = audioHW.micVoltsPerPascal
    # mode = 'chirp'
    mode = ''
    spCal = None
    # freq_array2 = audioParams.freq[1, :]
    
    try:
        frameNum = 0
        isSaveDirInit = False
        trialDur = appObj.spCal_stimDuration_dblSpinBox.value()*1e-3
        
        freq_array = freqArray
        freq_array2 = freqArray/1.22

        if numSpk == 1:
            freq_array = np.concatenate((freq_array, freq_array2))
            freq_array = np.sort(freq_array)
            freq_array2 = freq_array
        
        spCal = SpeakerCalData(np.vstack((freq_array, freq_array2)))            
        
        for spkNum in range(0, numSpk):
            chanNameOut = audioHW.speakerL_daqChan 
            #attenLines = audioHW.attenL_daqChan
            #attenLinesOther = audioHW.attenR_daqChan
            spkIdx = 0

            attenLvl1 = 0
            attenLvl2 = audioHW.maxAtten
                
            if spkNum == 2:
                #chanNameOut = audioHW.speakerR_daqChan
                #attenLines = audioHW.attenR_daqChan
                #attenLinesOther = audioHW.attenL_daqChan
                spkIdx = 1
                attenLvl1 = audioHW.maxAtten
                attenLvl2 = 0
    
            freq_idx = 0


            if not testMode:
                audioHW.setAttenuatorLevel(attenLvl1, attenLvl2, daq)
                
                # daq.sendDigOutCmd(attenLines, attenSig)
                # appObj.oct_hw.SetAttenLevel(0, attenLines)
                
            if mode == 'chirp':
                tChirp = 1    
                f0 = 100
                f1 = 100e3
                k = (f1- f0)/tChirp
                nChirpPts = round(outputRate*tChirp)
                t = np.linspace(0, tChirp, nChirpPts)
                spkOut = np.cos(2*np.pi*(f1*t + (k/2)*t**2))
                
                pl = appObj.spCal_output
                pl.clear()
                endIdx = int(5e-3 * outputRate)        # only plot first 5 ms
                pl.plot(t[0:endIdx], spkOut[0:endIdx], pen='b')
                        
                numInputSamples = int(inputRate*len(spkOut)/outputRate) 
                
                if testMode:
                    # mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)                    
                    pass
                else:
                    daq.setupAnalogOutput([chanNameOut], audioHW.daqTrigChanIn, int(outputRate), spkOut)
                    daq.startAnalogOutput()
                    
                    # setup the input task
                    daq.setupAnalogInput(chanNamesIn, audioHW.daqTrigChanIn, int(inputRate), numInputSamples) 
                    daq.startAnalogInput()
                
                    # trigger the acquiisiton by sending ditital pulse
                    daq.sendDigTrig(audioHW.daqTrigChanOut)
                    
                    mic_data = daq.readAnalogInput()
                    mic_data = mic_data[0, :]
                    mic_data_chirp = mic_data/micVoltsPerPascal

                
                if not testMode:
                    daq.waitDoneOutput(stopAndClear=True)
                    daq.stopAnalogInput()
                    daq.clearAnalogInput()
                    
                npts = len(mic_data)
                t = np.linspace(0, npts/inputRate, npts)
                pl = appObj.spCal_micInput
                pl.clear()
                pl.plot(t, mic_data, pen='b')
                
                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Time', 's', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Response', 'Pa', **labelStyle)
                
                # play refernce tone
                refFreq = 4e3
                tRef = 50e-3
                
                nRefPts = round(outputRate*tRef)
                t = np.linspace(0, tRef, nRefPts)
                spkOut = np.cos(2*np.pi*refFreq*t)
                
                # apply envelope
                i1 = round(outputRate*1e-3)
                i2 = nRefPts- i1
                env = np.linspace(0, 1, i1)
                spkOut[0:i1] = spkOut[0:i1]*env
                spkOut[i2:] = spkOut[i2:]*(1-env)
                
                if testMode:
                    # mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)                    
                    pass
                else:
                    daq.setupAnalogOutput([chanNameOut], audioHW.daqTrigChanIn, int(outputRate), spkOut)
                    daq.startAnalogOutput()
                    
                    # setup the input task
                    daq.setupAnalogInput(chanNamesIn, audioHW.daqTrigChanIn, int(inputRate), numInputSamples) 
                    daq.startAnalogInput()
                
                    # trigger the acquiisiton by sending ditital pulse
                    daq.sendDigTrig(audioHW.daqTrigChanOut)
                    
                    mic_data = daq.readAnalogInput()
                    mic_data_ref = mic_data/micVoltsPerPascal
                    
                if not testMode:
                    daq.waitDoneOutput(stopAndClear=True)
                    daq.stopAnalogInput()
                    daq.clearAnalogInput()
                
                micData, spCal = processSpkCalDataChirp(mic_data_chirp, mic_data_ref, inputRate, spCal, spkIdx, f0, f1, refFreq)
                    
                pl = appObj.spCal_micFFT
                pl.clear()
                df = micData.fft_freq[1] - micData.fft_freq[0]
                nf = len(micData.fft_freq)
                i1 = int(freq_array[0]*0.9/df)
                i2 = int(freq_array[-1]*1.1/df)
                print("SpeakerCalibration: df= %0.3f i1= %d i2= %d nf= %d" % (df, i1, i2, nf))
                pl.plot(micData.fft_freq[i1:i2], micData.fft_mag[i1:i2], pen='b')
                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Frequency', 'Hz', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Magnitude', 'db SPL', **labelStyle)
                
                pl = appObj.spCal_spkResp
                pl.clear()
#                pl.plot(1000*spCal.freq[spkIdx, :], spCal.magResp[spkIdx, :], pen="b", symbol='o')
                pl.plot(freq_array, spCal.magResp[spkIdx, :], pen="b", symbol='o')
                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Frequency', 'Hz', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Magnitude', 'db SPL', **labelStyle)
            else:
                for freq in freq_array:
                    print("runSpeakerCal freq=" + repr(freq))
                    spkOut = makeSpeakerCalibrationOutput(freq, audioHW, trialDur)    
                    npts = len(spkOut)
                    t = np.linspace(0, npts/outputRate, npts)
                    
                    pl = appObj.spCal_output
                    pl.clear()
                    endIdx = int(5e-3 * outputRate)        # only plot first 5 ms
                    pl.plot(t[0:endIdx], spkOut[0:endIdx], pen='b')
                            
                    numInputSamples = int(inputRate*len(spkOut)/outputRate) 
                    
                    if testMode:
                        # mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)                    
                        pass
                    else:
    
                        # setup the output task
                        daq.setupAnalogOutput([chanNameOut], audioHW.daqTrigChanIn, int(outputRate), spkOut)
                        daq.startAnalogOutput()
                        
                        # setup the input task
                        daq.setupAnalogInput(chanNamesIn, audioHW.daqTrigChanIn, int(inputRate), numInputSamples) 
                        daq.startAnalogInput()
                    
                        # trigger the acquiisiton by sending ditital pulse
                        daq.sendDigTrig(audioHW.daqTrigChanOut)
                        
                        mic_data = daq.readAnalogInput()
                        mic_data = mic_data[0, :]
                        mic_data = mic_data/micVoltsPerPascal
    
                    
                    if not testMode:
                        daq.stopAnalogInput()
                        daq.stopAnalogOutput()
                        daq.clearAnalogInput()
                        daq.clearAnalogOutput()
                    
                    npts = len(mic_data)
                    t = np.linspace(0, npts/inputRate, npts)
                    pl = appObj.spCal_micInput
                    pl.clear()
                    pl.plot(t, mic_data, pen='b')
                    
                    labelStyle = appObj.xLblStyle
                    pl.setLabel('bottom', 'Time', 's', **labelStyle)
                    labelStyle = appObj.yLblStyle
                    pl.setLabel('left', 'Response', 'Pa', **labelStyle)
                    
                    micData, spCal = processSpkCalData(mic_data, freq, freq_idx, inputRate, spCal, spkIdx, audioHW)
                    
                    pl = appObj.spCal_micFFT
                    pl.clear()
                    df = micData.fft_freq[1] - micData.fft_freq[0]
                    nf = len(micData.fft_freq)
                    i1 = int(freq_array[0]*0.9/df)
                    i2 = int(freq_array[-1]*1.1/df)
                    print("SpeakerCalibration: df= %0.3f i1= %d i2= %d nf= %d" % (df, i1, i2, nf))
                    pl.plot(micData.fft_freq[i1:i2], micData.fft_mag[i1:i2], pen='b')
                    labelStyle = appObj.xLblStyle
                    pl.setLabel('bottom', 'Frequency', 'Hz', **labelStyle)
                    labelStyle = appObj.yLblStyle
                    pl.setLabel('left', 'Magnitude', 'db SPL', **labelStyle)
                    
                    pl = appObj.spCal_spkResp
                    pl.clear()
    #                pl.plot(1000*spCal.freq[spkIdx, :], spCal.magResp[spkIdx, :], pen="b", symbol='o')
                    pl.plot(freq_array, spCal.magResp[spkIdx, :], pen="b", symbol='o')
                    labelStyle = appObj.xLblStyle
                    pl.setLabel('bottom', 'Frequency', 'Hz', **labelStyle)
                    labelStyle = appObj.yLblStyle
                    pl.setLabel('left', 'Magnitude', 'db SPL', **labelStyle)
                    
                    freq_idx += 1
                    
    #                if appObj.getSaveState():
    #                    if not isSaveDirInit:
    #                        saveDir = OCTCommon.initSaveDir(saveOpts, 'Speaker Calibration', audioParams=audioParams)
    #                        isSaveDirInit = True
    #    
    #                    if saveOpts.saveRaw:
    #                        OCTCommon.saveRawData(mic_data, saveDir, frameNum, dataType=3)
                        
                    QtGui.QApplication.processEvents() # check for GUI events, such as button presses
                    
                    # if done flag, break out of loop
                    if appObj.doneFlag:
                        break
                
                frameNum += 1

                
            # if done flag, break out of loop
            if appObj.doneFlag:
                break
                
        if not appObj.doneFlag:
            saveDir = appObj.configPath
            saveSpeakerCal(spCal, saveDir)
            appObj.audioHW.loadSpeakerCalFromProcData(spCal)
            appObj.spCal = spCal            
            
    except Exception as ex:
        traceback.print_exc(file=sys.stdout)
        QtGui.QMessageBox.critical (appObj, "Error", "Error during calibration. Check command line output for details")           
        
    8# update the audio hardware speaker calibration                     
    appObj.isCollecting = False
    QtGui.QApplication.processEvents() # check for GUI events, such as button presses
    appObj.finishCollection()


    
예제 #3
0
def runDPOAE(appObj, testMode=False):
    print("runDPOAE")
    
    try:
        appObj.tabWidget.setCurrentIndex(2)
        appObj.doneFlag = False
        appObj.isCollecting = True
        # trigRate = octfpga.GetTriggerRate()
        audioHW = appObj.audioHW
        bioamp = appObj.bioamp
        outputRate = audioHW.DAQOutputRate
        inputRate = audioHW.DAQInputRate
    
        # freq_array2 = audioParams.freq[1, :]
        freqArray = appObj.getFrequencyArray()
        
        if testMode:
            testDataDir = os.path.join(appObj.basePath, 'exampledata', 'DPOAE')
    #        filePath = os.path.join(testDataDir, 'AudioParams.pickle')
    #        f = open(filePath, 'rb')
    #        audioParams = pickle.load(f)
    #        f.close()
        else:
            # freqArray = appObj.getFrequencyArray()
            i1 = appObj.DPOAE_freqLow_comboBox.currentIndex()
            i2 = appObj.DPOAE_freqHigh_comboBox.currentIndex()
            print("runDPOAE: i1= ", i1, "i2= ", i2)
    
            ampLow = appObj.DPOAE_ampLow_spinBox.value()
            ampHigh = appObj.DPOAE_ampHigh_spinBox.value()
            ampDelta = appObj.DPOAE_ampDelta_spinBox.value()
            
            # ampArray = np.arange(ampLow, ampHigh, ampDelta)
            #numSteps = np.floor((ampHigh - ampLow)/ampDelta) + 1
            #ampArray = np.linspace(ampLow, ampHigh, numSteps)
            ampArray = np.arange(ampLow, ampHigh, ampDelta)
            if ampArray[-1] != ampHigh:
                ampArray = np.hstack((ampArray, ampHigh))
            
            freqArray = freqArray[i1:i2+1]
    
        # numSpk = audioParams.getNumSpeakers()
        if not testMode:
            from DAQHardware import DAQHardware
            daq = DAQHardware()
    
        chanNamesIn= [ audioHW.mic_daqChan, bioamp.daqChan]
        micVoltsPerPascal = audioHW.micVoltsPerPascal
        trialDur = appObj.DPOAE_stimDuration_dblSpinBox.value() * 1e-3
        # nReps = appObj.DPOAEtrialReps_spinBox.value()
    
        # set input rate multiple the highest output frequency, a little more than Nyquest so stim frequency is more towards center 
        inputRate = 4*freqArray[-1]
        inputRate = outputRate / int(np.floor(outputRate / inputRate))  # pick closest input rate that evenly divides output rate
        
        frameNum = 0
        isSaveDirInit = False

        attenLines1 = audioHW.attenL_daqChan
        attenLines2 = audioHW.attenR_daqChan
        
        freq_idx = 0
        DPOAEdata = None
        numSpk = appObj.speaker_comboBox.currentIndex()+1
        chanNameOut = audioHW.speakerL_daqChan 
        if numSpk > 1:
            chanNameOut = [audioHW.speakerL_daqChan, audioHW.speakerR_daqChan ]
        print("runDPOAE numSpk=", numSpk)
        
        for freq in freqArray:
            sp1, sp2 = makeDPOAEOutput(freq, trialDur, audioHW)
            # spkOut = np.tile(spkOut_trial, nReps)
            
            npts = len(sp1)
            tOut = np.linspace(0, npts/outputRate, npts)
            print("runDPOAE npts=%d len(spkOut)= %d len(tOut)= %d" % (npts, len(sp1), len(tOut)))
            amp_idx = 0
            # ptsPerRep = inputRate
            
            for amp in ampArray:
                print("runDPOAE freq=" + repr(freq), " amp= ", + amp, " freq_idx= ", freq_idx, " amp_idx= ", amp_idx)
                vOut1, attenLvl1 = audioHW.getCalibratedOutputVoltageAndAttenLevel(freq, amp, 0)
                spkNum = numSpk - 1
                vOut2, attenLvl2 = audioHW.getCalibratedOutputVoltageAndAttenLevel(freq/1.22, amp, spkNum)
                if vOut1 > 0 and vOut2 > 0:
                    # attenSig = AudioHardware.makeLM1972AttenSig(0)
                    if not testMode:
                        if numSpk > 1:
                            audioHW.setAttenuatorLevel(attenLvl1, attenLvl2, daq)
                        else:
                            if attenLvl1 > attenLvl2:
                                dbDiff = attenLvl1 - attenLvl2
                                attenLvl1 = attenLvl2
                                vOut2 = vOut2*(10**(dbDiff/20))
                            elif attenLvl1 < attenLvl2:
                                dbDiff = attenLvl2 - attenLvl1
                                attenLvl2 = attenLvl1
                                vOut1 = vOut1*(10**(dbDiff/20))
                                
                            audioHW.setAttenuatorLevel(attenLvl1, audioHW.maxAtten, daq)
                            
                        # daq.sendDigOutDPOAEd(attenLines, attenSig)
                        # appObj.oct_hw.SetAttenLevel(0, attenLines)
                    
                    pl = appObj.DPOAE_output
                    pl.clear()
                    endIdx = int(5e-3 * outputRate)        # only plot first 5 ms
                    #pl.plot(t[0:endIdx], spkOut[0:endIdx], pen='b')
                    pl.plot(tOut, sp1 + sp2, pen='b')
                    
                    labelStyle = appObj.xLblStyle
                    pl.setLabel('bottom', 'Time', 's', **labelStyle)
                    labelStyle = appObj.yLblStyle
                    pl.setLabel('left', 'Output', 'V', **labelStyle)
                    
                    numInputSamples = int(inputRate*len(sp1)/outputRate)
                    
                    if testMode:
                        # mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)                    
                        pass
                    else:
        
                        # setup the output task
                        if numSpk > 1:
                            spkOut = np.vstack((vOut1*sp1, vOut2*sp2))
                        else:
                            spkOut = vOut1*sp1 + vOut2*sp2
                            
                        daq.setupAnalogOutput([chanNameOut], audioHW.daqTrigChanIn, int(outputRate), spkOut)
                        daq.startAnalogOutput()
                        
                        # setup the input task
                        daq.setupAnalogInput(chanNamesIn, audioHW.daqTrigChanIn, int(inputRate), numInputSamples) 
                        daq.startAnalogInput()
                    
                        # trigger the acquiisiton by sending ditital pulse
                        daq.sendDigTrig(audioHW.daqTrigChanOut)
                        
                        dataIn = daq.readAnalogInput()
                        mic_data = dataIn[0, :]
                        
                        mic_data = mic_data/micVoltsPerPascal
                    
                        daq.stopAnalogInput()
                        daq.stopAnalogOutput()
                        daq.clearAnalogInput()
                        daq.clearAnalogOutput()
                    
                    npts = len(mic_data)
                    t = np.linspace(0, npts/inputRate, npts)
                    pl = appObj.spCal_micInput
                    pl.clear()
                    pl.plot(t, mic_data, pen='b')
                    
                    labelStyle = appObj.xLblStyle
                    pl.setLabel('bottom', 'Time', 's', **labelStyle)
                    labelStyle = appObj.yLblStyle
                    pl.setLabel('left', 'Response', 'Pa', **labelStyle)
        
                    DPOAEptData, DPOAEdata = processDPOAEData(mic_data, freq, freq_idx, amp_idx, freqArray, ampArray, inputRate, DPOAEdata)
    
                    print("runDPOAE: plotting data")
                    plotDPOAEdata(appObj, DPOAEptData, DPOAEdata)
                    
                QtGui.QApplication.processEvents() # check for GUI events, such as button presses
                
                # if done flag, break out of loop
                if appObj.doneFlag:
                    break
                
                frameNum += 1
                amp_idx += 1
                
            # if done flag, break out of loop
            if appObj.doneFlag:
                break
            
            freq_idx += 1


        saveOpts = appObj.getSaveOpts()
        workbook = appObj.excelWB
        note = saveOpts.note
        number = appObj.DPOAEnumber
        name = 'DPOAE'
        d = datetime.datetime.now()
        timeStr = d.strftime('%H_%M_%S')
        
        saveOpts.saveMicData = appObj.DPOAE_saveMicData_checkBox.isChecked()
        saveOpts.saveMicFFT = appObj.DPOAE_saveMicFFT_checkBox.isChecked()
        saveDir = appObj.saveDir_lineEdit.text()
        
        plotName = 'DPOAE %d %s %s' % (number, timeStr, saveOpts.note)
        plotFilePath = saveDPOAEDataFig(DPOAEdata, trialDur, saveDir, plotName, timeStr)
        
        reply = QtGui.QMessageBox.question(appObj, 'Save', "Keep data?" , QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.Yes)
        if reply == QtGui.QMessageBox.Yes:
            excelWS = CMPCommon.initExcelSpreadsheet(workbook, name, number, timeStr, note)
            saveDPOAEDataXLS(DPOAEdata, trialDur, excelWS, saveOpts)
            #saveDPOAEData(DPOAEdata, trialDur, nReps, appObj.saveFileTxt_filepath, saveOpts, timeStr)
            
            saveDPOAEDataPickle(DPOAEdata, trialDur, plotName, saveOpts, timeStr)
            appObj.DPOAEnumber += 1                
            
        else:
            os.remove(plotFilePath)
            
    except Exception as ex:
        traceback.print_exc(file=sys.stdout)
        QtGui.QMessageBox.critical (appObj, "Error", "Error during collection. Check command line output for details")           
        
    8# update the audio hardware speaker calibration                     
    appObj.isCollecting = False
    QtGui.QApplication.processEvents() # check for GUI events, such as button presses
    appObj.finishCollection()
예제 #4
0
파일: ABR.py 프로젝트: udayragakiran/PyCMP
def runABR(appObj, testMode=False):
    print("runABR")
    
    appObj.tabWidget.setCurrentIndex(3)
    appObj.doneFlag = False
    appObj.isCollecting = True
    # trigRate = octfpga.GetTriggerRate()
    audioHW = appObj.audioHW
    bioamp = appObj.bioamp
    outputRate = audioHW.DAQOutputRate
    inputRate = audioHW.DAQInputRate

    # freq_array2 = audioParams.freq[1, :]
    freqArray = appObj.getFrequencyArray()
    ABRparams = ABRParams(appObj)
    
    if testMode:
        testDataDir = os.path.join(appObj.basePath, 'exampledata', 'Speaker Calibration')
#        filePath = os.path.join(testDataDir, 'AudioParams.pickle')
#        f = open(filePath, 'rb')
#        audioParams = pickle.load(f)
#        f.close()
    else:
        # freqArray = appObj.getFrequencyArray()
        i1 = appObj.ABR_freqLow_comboBox.currentIndex()
        i2 = appObj.ABR_freqHigh_comboBox.currentIndex()
        print("runABR: i1= ", i1, "i2= ", i2)
        ampLow = appObj.ABRampLow_spinBox.value()
        ampHigh = appObj.ABRampHigh_spinBox.value()
        ampDelta = appObj.ABRampDelta_spinBox.value()
        
        # ampArray = np.arange(ampLow, ampHigh, ampDelta)
        #numSteps = np.floor((ampHigh - ampLow)/ampDelta) + 1
        #ampArray = np.linspace(ampLow, ampHigh, numSteps)
        if ampLow == ampHigh:
            ampArray = np.array([ampLow])
        else:
            ampArray = np.arange(ampLow, ampHigh, ampDelta)
            if ampArray[-1] != ampHigh:
                ampArray = np.hstack((ampArray, ampHigh))
        
        freqArray = freqArray[i1:i2+1]
    
    if ABRparams.click:
        freqArray = freqArray[0:1]  # only use single freqeucny
        clickRMS = appObj.ABRclick_RMS
        
    # numSpk = audioParams.getNumSpeakers()
    if not testMode:
        from DAQHardware import DAQHardware
        daq = DAQHardware()

    chanNamesIn= [ audioHW.mic_daqChan, bioamp.daqChan]
    micVoltsPerPascal = audioHW.micVoltsPerPascal
    
    

    # set input rate to three times the highest output frequency, to allow plus a 
    
#    inputRate = 3*freqArray[-1]
#    print("runABR: outputRate= ", outputRate, " inputRate= ", inputRate)
#    inputRate = np.max((inputRate, 6e3))   # inpute rate should be at least 6 kHz because ABR responses occur 0.3 - 3 kHz
#    inputRate = outputRate / int(np.floor(outputRate / inputRate))  # pick closest input rate that evenly divides output rate
#    print("runABR: inputRate(final)= ", inputRate)
    
    try:
        frameNum = 0
        numFrames = len(freqArray)*len(ampArray)
        isSaveDirInit = False
        chanNameOut = audioHW.speakerL_daqChan 
        attenLines = audioHW.attenL_daqChan
        
        freq_idx = 0
        ABRdata = None
        appObj.status_label.setText("Running")
        appObj.progressBar.setValue(0)
        
        for freq in freqArray:
            spkOut_trial = makeABROutput(freq, ABRparams, audioHW)
            npts = len(spkOut_trial)
            spkOut = np.tile(spkOut_trial, ABRparams.nReps)
            # invert every other trial, necessary for ABR/CAP output 
            for n in range(1, ABRparams.nReps, 2):
                idx1 = n*npts
                idx2 = (n+1)*npts
                spkOut[idx1:idx2] = -spkOut[idx1:idx2]
#            plt.figure(5)
#            plt.clf()
#            plt.plot(spkOut)
            tOut = np.linspace(0, npts/outputRate, npts)
            print("runABR npts=%d len(spkOut_trial)= %d len(tOut)= %d" % (npts, len(spkOut_trial), len(tOut)))
            amp_idx = 0
            ptsPerRep = int(inputRate*ABRparams.trialDur)
            
            for amp in ampArray:
                print("runABR freq=" + repr(freq), " amp= ", + amp, " freq_idx= ", freq_idx, " amp_idx= ", amp_idx)
                if ABRparams.click:
                    clickRMS = appObj.ABRclick_RMS
                    attenLvl = 0
                    vOut = 10**((amp - clickRMS)/20)
                    minV = audioHW.speakerOutputRng[0]
                    if vOut < minV:
                        attenLvl = int(round(20*np.log10(minV/vOut)))
                        vOut = minV
                else:
                    vOut, attenLvl = audioHW.getCalibratedOutputVoltageAndAttenLevel(freq, amp, 0)
                
                print("runABR vOut= ", vOut, " atenLvl=", attenLvl)
                
                if vOut > audioHW.speakerOutputRng[1]:
                    print("runABR vOut= ", vOut, "  out of range")
                    continue
                elif attenLvl > audioHW.maxAtten:
                    print("runABR attenLvl= ", attenLvl, "  gerater than maximum attenuation")
                    continue
                    
                # attenSig = AudioHardware.makeLM1972AttenSig(0)
                if not testMode:
                    AudioHardware.Attenuator.setLevel(attenLvl, attenLines)
                    # daq.sendDigOutABRd(attenLines, attenSig)
                    # appObj.oct_hw.SetAttenLevel(0, attenLines)
                
                pl = appObj.ABR_output
                pl.clear()
                endIdx = int(5e-3 * outputRate)        # only plot first 5 ms
                #pl.plot(t[0:endIdx], spkOut[0:endIdx], pen='b')
                pl.plot(tOut, spkOut_trial, pen='b')
                
                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Time', 's', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Output', 'V', **labelStyle)
                
                numInputSamples = ABRparams.nReps*int(inputRate*len(spkOut_trial)/outputRate)
                
                if testMode:
                    # mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)                    
                    pass
                else:
    
                    # setup the output task
                    daq.setupAnalogOutput([chanNameOut], audioHW.daqTrigChanIn, int(outputRate), vOut*spkOut)
                    daq.startAnalogOutput()
                    
                    # setup the input task
                    daq.setupAnalogInput(chanNamesIn, audioHW.daqTrigChanIn, int(inputRate), numInputSamples) 
                    daq.startAnalogInput()
                
                    # trigger the acquiisiton by sending ditital pulse
                    daq.sendDigTrig(audioHW.daqTrigChanOut)
                    
                    timeout = numInputSamples/inputRate + 2
                    dataIn = daq.readAnalogInput(timeout)
                    mic_data = dataIn[0, :]
                    bioamp_data = dataIn[1, :]
                    
                    mic_data = mic_data/micVoltsPerPascal
                    bioamp_data = bioamp_data/bioamp.gain
                
                    daq.waitDoneInput()
                    daq.stopAnalogInput()
                    daq.clearAnalogInput()
                    
                    daq.waitDoneOutput(stopAndClear=True)
                
#                npts = len(mic_data)
#                t = np.linspace(0, npts/inputRate, npts)
#                pl = appObj.ABR_micInput
#                pl.clear()
#                pl.plot(t, mic_data, pen='b')
#                
#                labelStyle = appObj.xLblStyle
#                pl.setLabel('bottom', 'Time', 's', **labelStyle)
#                labelStyle = appObj.yLblStyle
#                pl.setLabel('left', 'Response', 'Pa', **labelStyle)
    
    # def processABRData(mic_data, bioamp_data, nReps, freq, amp_idx, inputRate, ABRdataIn):            
                ABRptData, ABRdata = processABRData(mic_data, bioamp_data, freq, freq_idx, amp_idx, freqArray, ampArray, inputRate, ABRdata, ABRparams)

                print("runABR: plotting data")
                plotABRdata(appObj, ABRptData, ABRdata)
                
    #                if appObj.getSaveState():
    #                    if not isSaveDirInit:
    #                        saveDir = OCTCommon.initSaveDir(saveOpts, 'Speaker Calibration', audioParams=audioParams)
    #                        isSaveDirInit = True
    #    
    #                    if saveOpts.saveRaw:
    #                        OCTCommon.saveRawData(mic_data, saveDir, frameNum, dataType=3)
                idx1 = round(inputRate*ABRparams.stimOffset)
                idx2 = idx1 + round(inputRate*ABRparams.stimDur)
                
                mic_data = mic_data[idx1:idx2] 
                rms = np.mean(mic_data ** 2) ** 0.5
                rms = 20*np.log10(rms/2e-5)
                
                appObj.ABR_rms_label.setText("%0.1f dB" % rms)                    
                
                QtGui.QApplication.processEvents() # check for GUI events, such as button presses
                
                # if done flag, break out of loop
                if appObj.doneFlag:
                    break
                
                frameNum += 1
                amp_idx += 1
                appObj.progressBar.setValue(frameNum/numFrames)
                
            # if done flag, break out of loop
            if appObj.doneFlag:
                break
            
            freq_idx += 1


        saveOpts = appObj.getSaveOpts()
        workbook = appObj.excelWB
        note = saveOpts.note
        number = appObj.ABRnumber
        name = 'ABR'
        d = datetime.datetime.now()
        timeStr = d.strftime('%H_%M_%S')
        excelWS = CMPCommon.initExcelSpreadsheet(workbook, name, number, timeStr, note)
    
        appObj.ABRnumber += 1                
        #saveOpts.saveTracings = appObj.ABR_saveTracings_checkBox.isChecked()
        saveOpts.saveTracings = True
        saveDir = appObj.saveDir_lineEdit.text()
        saveABRDataXLS(ABRdata, ABRparams, excelWS, saveOpts)
        #saveABRData(ABRdata, trialDur, nReps, appObj.saveFileTxt_filepath, saveOpts, timeStr)
        
        plotName = 'ABR %d %s %s' % (number, timeStr, saveOpts.note)
        saveABRDataFig(ABRdata, ABRparams, saveDir, plotName, timeStr)
        saveABRDataPickle(ABRdata, ABRparams, plotName, saveOpts, timeStr)
            
    except Exception as ex:
        traceback.print_exc(file=sys.stdout)
        QtGui.QMessageBox.critical (appObj, "Error", "Error during collection. Check command line output for details")           
        
    # update the audio hardware speaker calibration                     
    appObj.isCollecting = False
    QtGui.QApplication.processEvents() # check for GUI events, such as button presses
    appObj.finishCollection()
예제 #5
0
파일: ABR.py 프로젝트: udayragakiran/PyCMP
def calibrateClick(appObj, testMode=False):
    print("ABR.calibrateClick")
    
    appObj.tabWidget.setCurrentIndex(3)
    appObj.doneFlag = False
    appObj.isCollecting = True
    # trigRate = octfpga.GetTriggerRate()
    audioHW = appObj.audioHW
    outputRate = audioHW.DAQOutputRate
    inputRate = audioHW.DAQInputRate

    # freq_array2 = audioParams.freq[1, :]
    freqArray = appObj.getFrequencyArray()
    
    if testMode:
        testDataDir = os.path.join(appObj.basePath, 'exampledata', 'Speaker Calibration')
#        filePath = os.path.join(testDataDir, 'AudioParams.pickle')
#        f = open(filePath, 'rb')
#        audioParams = pickle.load(f)
#        f.close()
    else:
        pass

    # numSpk = audioParams.getNumSpeakers()
    if not testMode:
        from DAQHardware import DAQHardware
        daq = DAQHardware()

    chanNamesIn= [ audioHW.mic_daqChan]
    micVoltsPerPascal = audioHW.micVoltsPerPascal
    ABRparams = ABRParams(appObj)
    ABRparams.click = True
    ABRparams.nReps = 20
    print("ABR.calibrateClick ABRparams=", ABRparams.__dict__)
    # set input rate to three times the highest output frequency, to allow plus a 
    
#    inputRate = 3*freqArray[-1]
#    print("runABR: outputRate= ", outputRate, " inputRate= ", inputRate)
#    inputRate = np.max((inputRate, 6e3))   # inpute rate should be at least 6 kHz because ABR responses occur 0.3 - 3 kHz
#    inputRate = outputRate / int(np.floor(outputRate / inputRate))  # pick closest input rate that evenly divides output rate
#    print("runABR: inputRate(final)= ", inputRate)
    
    try:
        chanNameOut = audioHW.speakerL_daqChan 
        attenLines = audioHW.attenL_daqChan
        
        spkOut_trial = makeABROutput(4e3, ABRparams, audioHW)
        spkOut = np.tile(spkOut_trial, ABRparams.nReps)
        npts = len(spkOut_trial)
        tOut = np.linspace(0, npts/outputRate, npts)
            
        # attenSig = AudioHardware.makeLM1972AttenSig(0)
        if not testMode:
            AudioHardware.Attenuator.setLevel(0, attenLines)
                
        pl = appObj.ABR_output
        pl.clear()
        endIdx = int(5e-3 * outputRate)        # only plot first 5 ms
        #pl.plot(t[0:endIdx], spkOut[0:endIdx], pen='b')
        pl.plot(tOut, spkOut_trial, pen='b')
                
        labelStyle = appObj.xLblStyle
        pl.setLabel('bottom', 'Time', 's', **labelStyle)
        labelStyle = appObj.yLblStyle
        pl.setLabel('left', 'Output', 'V', **labelStyle)
                
        numInputSamples = int(inputRate*len(spkOut)/outputRate)
        
        if testMode:
            # mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)                    
            pass
        else:
            # setup the output task
            daq.setupAnalogOutput([chanNameOut], audioHW.daqTrigChanIn, int(outputRate), spkOut)
            daq.startAnalogOutput()
            
            # setup the input task
            daq.setupAnalogInput(chanNamesIn, audioHW.daqTrigChanIn, int(inputRate), numInputSamples) 
            daq.startAnalogInput()
        
            # trigger the acquiisiton by sending ditital pulse
            daq.sendDigTrig(audioHW.daqTrigChanOut)
            
            timeout = numInputSamples/inputRate + 2
            dataIn = daq.readAnalogInput(timeout)
            mic_data = dataIn[0, :]
            
            mic_data = mic_data/micVoltsPerPascal
        
            daq.waitDoneInput()
            daq.stopAnalogInput()
            daq.clearAnalogInput()
            
            daq.waitDoneOutput(stopAndClear=True)
        
        print("ABR.calibrateClick: plotting data")
        npts = len(mic_data)
        
        # reshape and average the mic data
        ptsPerRep = npts // ABRparams.nReps
        mic_data = np.reshape(mic_data, (ABRparams.nReps, ptsPerRep))
        mic_data = np.mean(mic_data, 0)
        
        # plot mic data
        npts = len(mic_data)
        t = np.linspace(0, npts/inputRate, npts)
        pl = appObj.ABR_micInput
        pl.clear()
        pl.plot(t, mic_data, pen='b')
        
        labelStyle = appObj.xLblStyle
        pl.setLabel('bottom', 'Time', 's', **labelStyle)
        labelStyle = appObj.yLblStyle
        pl.setLabel('left', 'Response', 'Pa', **labelStyle)
        
        idx1 = round(inputRate*ABRparams.stimOffset)
        idx2 = idx1 + round(inputRate*ABRparams.stimDur)
        
        mic_data = mic_data[idx1:idx2] 
        # apply high pass filter to get rid of LF components
#        (b, a) = scipy.signal.butter(5, 100/inputRate, 'high')
#        mic_data = scipy.signal.lfilter(b, a, mic_data) 

        rms = np.mean(mic_data ** 2) ** 0.5
        rms = 20*np.log10(rms/2e-5)
        appObj.ABRclick_RMS = rms
        
        appObj.ABR_rms_label.setText("%0.1f dB" % rms)
        print("ABR.calibrateClick: RMS= ", rms)
        
    except Exception as ex:
        traceback.print_exc(file=sys.stdout)
        QtGui.QMessageBox.critical (appObj, "Error", "Error during collection. Check command line output for details")           
        
    appObj.isCollecting = False
    QtGui.QApplication.processEvents() # check for GUI events, such as button presses
    appObj.finishCollection()
예제 #6
0
def run(appObj, testMode=False):
    print("ReadMicBioAmp.run")
    appObj.tabWidget.setCurrentIndex(1)
    appObj.doneFlag = False
    appObj.isCollecting = True
    # trigRate = octfpga.GetTriggerRate()
    audioHW = appObj.audioHW
    bioamp = appObj.bioamp
    # outputRate = audioHW.DAQOutputRate
    inputRate = audioHW.DAQInputRate
    
    if not testMode:
        from DAQHardware import DAQHardware
        daq = DAQHardware()

    chanNamesIn= [ audioHW.mic_daqChan, bioamp.daqChan]
    micVoltsPerPascal = audioHW.micVoltsPerPascal
    bioampGain = bioamp.gain
    
    firstIter = True
    while not appObj.doneFlag:
        try:
            if testMode:
                # mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)                    
                continue
            else:
                # inputTime = 100e-3
                inputTime = 1e-3*appObj.readMicBioamp_duration_dblSpinBox.value()
                numInputSamples = round(inputRate*inputTime)
                
                # setup the input task
                daq.setupAnalogInput(chanNamesIn, audioHW.daqTrigChanIn, int(inputRate), numInputSamples) 
                daq.startAnalogInput()
            
                # trigger the acquiisiton by sending ditital pulse
                daq.sendDigTrig(audioHW.daqTrigChanOut)
                
                data = daq.readAnalogInput()
                print("data.shape= ", data.shape)
                mic_data = data[0, :]
                bioamp_data = data[1, :]
                #mic_data = data[:, 0]
                #bioamp_data = data[:, 1]
                
                mic_data = mic_data/micVoltsPerPascal
                bioamp_data = bioamp_data/bioampGain
    
                daq.stopAnalogInput()
                daq.clearAnalogInput()
            
            npts = len(mic_data)
            t = np.linspace(0, npts/inputRate, npts)
            
            pl = appObj.inputs_micPlot
            if firstIter:
                pl.clear()
                
                micPI = pl.plot(t, mic_data, pen='b')
                
                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Time', 's', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Response', 'Pa', **labelStyle)
            else:
                data = np.vstack((t, mic_data))
                micPI.setData(data.transpose())
            
            pl = appObj.inputs_bioampPlot
            if firstIter:
                pl.clear()
                bioampPI = pl.plot(t, bioamp_data, pen='b')
    
                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Time', 's', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Response', 'V', **labelStyle)
            else:
                data = np.vstack((t, bioamp_data))
                bioampPI.setData(data.transpose())
            
            numfftpts = npts*2
            win_fcn = 2*np.hanning(npts)
            mic_fft = np.fft.fft(win_fcn*mic_data, numfftpts)
            endIdx = np.ceil(numfftpts/2)
            mic_fft = mic_fft[0:endIdx]
            mic_fft_mag = 2*np.abs(mic_fft)/numfftpts
            
            fftrms_corr = 1/(np.sqrt(2))
            mic_fft_mag = fftrms_corr*mic_fft_mag 
            mic_fft_mag_log = 20*np.log10(mic_fft_mag/20e-6 )  # 20e-6 pa
            
            mic_freq = np.linspace(0, inputRate/2, endIdx)
            
            pl = appObj.inputs_micFFTPlot
            if firstIter:
                pl.clear()
                micFFTPI = pl.plot(mic_freq, mic_fft_mag_log, pen='b')

                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Frequency', 'Hz', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Magnitude', 'dB SPL', **labelStyle)
            else:
                data = np.vstack((mic_freq, mic_fft_mag_log))
                micFFTPI.setData(data.transpose())
                
            Wn = [300, 3000]
            Wn = np.array(Wn)/inputRate
            #Wn = [0.001, 0.01]
#            (b, a) = scipy.signal.butter(5, Wn=Wn, btype='bandpass')
            (b, a) = scipy.signal.iirfilter(2, Wn,  btype='bandpass', ftype='bessel')

            #b = scipy.signal.firwin(21, Wn)
            #a = [1.0]
            bioamp_filt = scipy.signal.lfilter(b, a, bioamp_data) 

            print("bioamp_data.shape= ", bioamp_data.shape, " t.shape=", t.shape, " Wn=", Wn)
            print("b= ", b)
            print("a= ", a)

            
            if firstIter:
                pl = appObj.inputs_bioampFilteredPlot
                pl.clear()
                bioampFFTPI = pl.plot(t, bioamp_filt, pen='b')
    
                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Time', 's', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Response', 'V', **labelStyle)
                
            else:
                #data = np.vstack((t, bioamp_filt))
                bioampFFTPI.setData(t, bioamp_filt)
                
            firstIter = False
            
        except Exception as ex:
            traceback.print_exc(file=sys.stdout)
            QtGui.QMessageBox.critical (appObj, "Error", "Error. Check command line output for details")
            appObj.doneFlag = True
            
    
        QtGui.QApplication.processEvents() # check for GUI events, such as button presses
    
    # update the audio hardware speaker calibration                     
    appObj.finishCollection()
예제 #7
0
def runDPOAE(appObj, testMode=False):
    print("runDPOAE")

    try:
        appObj.tabWidget.setCurrentIndex(2)
        appObj.doneFlag = False
        appObj.isCollecting = True
        # trigRate = octfpga.GetTriggerRate()
        audioHW = appObj.audioHW
        bioamp = appObj.bioamp
        outputRate = audioHW.DAQOutputRate
        inputRate = audioHW.DAQInputRate

        # freq_array2 = audioParams.freq[1, :]
        freqArray = appObj.getFrequencyArray()

        if testMode:
            testDataDir = os.path.join(appObj.basePath, 'exampledata', 'DPOAE')
    #        filePath = os.path.join(testDataDir, 'AudioParams.pickle')
    #        f = open(filePath, 'rb')
    #        audioParams = pickle.load(f)
    #        f.close()
        else:
            # freqArray = appObj.getFrequencyArray()
            i1 = appObj.DPOAE_freqLow_comboBox.currentIndex()
            i2 = appObj.DPOAE_freqHigh_comboBox.currentIndex()
            print("runDPOAE: i1= ", i1, "i2= ", i2)

            ampLow = appObj.DPOAE_ampLow_spinBox.value()
            ampHigh = appObj.DPOAE_ampHigh_spinBox.value()
            ampDelta = appObj.DPOAE_ampDelta_spinBox.value()

            # ampArray = np.arange(ampLow, ampHigh, ampDelta)
            #numSteps = np.floor((ampHigh - ampLow)/ampDelta) + 1
            #ampArray = np.linspace(ampLow, ampHigh, numSteps)
            ampArray = np.arange(ampLow, ampHigh, ampDelta)
            if ampArray[-1] != ampHigh:
                ampArray = np.hstack((ampArray, ampHigh))

            freqArray = freqArray[i1:i2 + 1]

        # numSpk = audioParams.getNumSpeakers()
        if not testMode:
            from DAQHardware import DAQHardware
            daq = DAQHardware()

        chanNamesIn = [audioHW.mic_daqChan, bioamp.daqChan]
        micVoltsPerPascal = audioHW.micVoltsPerPascal
        trialDur = appObj.DPOAE_stimDuration_dblSpinBox.value() * 1e-3
        # nReps = appObj.DPOAEtrialReps_spinBox.value()

        # set input rate multiple the highest output frequency, a little more than Nyquest so stim frequency is more towards center
        inputRate = 4 * freqArray[-1]
        inputRate = outputRate / int(
            np.floor(outputRate / inputRate)
        )  # pick closest input rate that evenly divides output rate

        frameNum = 0
        isSaveDirInit = False

        attenLines1 = audioHW.attenL_daqChan
        attenLines2 = audioHW.attenR_daqChan

        freq_idx = 0
        DPOAEdata = None
        numSpk = appObj.speaker_comboBox.currentIndex() + 1
        chanNameOut = audioHW.speakerL_daqChan
        if numSpk > 1:
            chanNameOut = [audioHW.speakerL_daqChan, audioHW.speakerR_daqChan]
        print("runDPOAE numSpk=", numSpk)

        for freq in freqArray:
            sp1, sp2 = makeDPOAEOutput(freq, trialDur, audioHW)
            # spkOut = np.tile(spkOut_trial, nReps)

            npts = len(sp1)
            tOut = np.linspace(0, npts / outputRate, npts)
            print("runDPOAE npts=%d len(spkOut)= %d len(tOut)= %d" %
                  (npts, len(sp1), len(tOut)))
            amp_idx = 0
            # ptsPerRep = inputRate

            for amp in ampArray:
                print("runDPOAE freq=" + repr(freq), " amp= ", +amp,
                      " freq_idx= ", freq_idx, " amp_idx= ", amp_idx)
                vOut1, attenLvl1 = audioHW.getCalibratedOutputVoltageAndAttenLevel(
                    freq, amp, 0)
                spkNum = numSpk - 1
                vOut2, attenLvl2 = audioHW.getCalibratedOutputVoltageAndAttenLevel(
                    freq / 1.22, amp, spkNum)
                if vOut1 > 0 and vOut2 > 0:
                    # attenSig = AudioHardware.makeLM1972AttenSig(0)
                    if not testMode:
                        if numSpk > 1:
                            audioHW.setAttenuatorLevel(attenLvl1, attenLvl2,
                                                       daq)
                        else:
                            if attenLvl1 > attenLvl2:
                                dbDiff = attenLvl1 - attenLvl2
                                attenLvl1 = attenLvl2
                                vOut2 = vOut2 * (10**(dbDiff / 20))
                            elif attenLvl1 < attenLvl2:
                                dbDiff = attenLvl2 - attenLvl1
                                attenLvl2 = attenLvl1
                                vOut1 = vOut1 * (10**(dbDiff / 20))

                            audioHW.setAttenuatorLevel(attenLvl1,
                                                       audioHW.maxAtten, daq)

                        # daq.sendDigOutDPOAEd(attenLines, attenSig)
                        # appObj.oct_hw.SetAttenLevel(0, attenLines)

                    pl = appObj.DPOAE_output
                    pl.clear()
                    endIdx = int(5e-3 * outputRate)  # only plot first 5 ms
                    #pl.plot(t[0:endIdx], spkOut[0:endIdx], pen='b')
                    pl.plot(tOut, sp1 + sp2, pen='b')

                    labelStyle = appObj.xLblStyle
                    pl.setLabel('bottom', 'Time', 's', **labelStyle)
                    labelStyle = appObj.yLblStyle
                    pl.setLabel('left', 'Output', 'V', **labelStyle)

                    numInputSamples = int(inputRate * len(sp1) / outputRate)

                    if testMode:
                        # mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)
                        pass
                    else:

                        # setup the output task
                        if numSpk > 1:
                            spkOut = np.vstack((vOut1 * sp1, vOut2 * sp2))
                        else:
                            spkOut = vOut1 * sp1 + vOut2 * sp2

                        daq.setupAnalogOutput([chanNameOut],
                                              audioHW.daqTrigChanIn,
                                              int(outputRate), spkOut)
                        daq.startAnalogOutput()

                        # setup the input task
                        daq.setupAnalogInput(chanNamesIn,
                                             audioHW.daqTrigChanIn,
                                             int(inputRate), numInputSamples)
                        daq.startAnalogInput()

                        # trigger the acquiisiton by sending ditital pulse
                        daq.sendDigTrig(audioHW.daqTrigChanOut)

                        dataIn = daq.readAnalogInput()
                        mic_data = dataIn[0, :]

                        mic_data = mic_data / micVoltsPerPascal

                        daq.stopAnalogInput()
                        daq.stopAnalogOutput()
                        daq.clearAnalogInput()
                        daq.clearAnalogOutput()

                    npts = len(mic_data)
                    t = np.linspace(0, npts / inputRate, npts)
                    pl = appObj.spCal_micInput
                    pl.clear()
                    pl.plot(t, mic_data, pen='b')

                    labelStyle = appObj.xLblStyle
                    pl.setLabel('bottom', 'Time', 's', **labelStyle)
                    labelStyle = appObj.yLblStyle
                    pl.setLabel('left', 'Response', 'Pa', **labelStyle)

                    DPOAEptData, DPOAEdata = processDPOAEData(
                        mic_data, freq, freq_idx, amp_idx, freqArray, ampArray,
                        inputRate, DPOAEdata)

                    print("runDPOAE: plotting data")
                    plotDPOAEdata(appObj, DPOAEptData, DPOAEdata)

                QtGui.QApplication.processEvents(
                )  # check for GUI events, such as button presses

                # if done flag, break out of loop
                if appObj.doneFlag:
                    break

                frameNum += 1
                amp_idx += 1

            # if done flag, break out of loop
            if appObj.doneFlag:
                break

            freq_idx += 1

        saveOpts = appObj.getSaveOpts()
        workbook = appObj.excelWB
        note = saveOpts.note
        number = appObj.DPOAEnumber
        name = 'DPOAE'
        d = datetime.datetime.now()
        timeStr = d.strftime('%H_%M_%S')

        saveOpts.saveMicData = appObj.DPOAE_saveMicData_checkBox.isChecked()
        saveOpts.saveMicFFT = appObj.DPOAE_saveMicFFT_checkBox.isChecked()
        saveDir = appObj.saveDir_lineEdit.text()

        plotName = 'DPOAE %d %s %s' % (number, timeStr, saveOpts.note)
        plotFilePath = saveDPOAEDataFig(DPOAEdata, trialDur, saveDir, plotName,
                                        timeStr)

        reply = QtGui.QMessageBox.question(
            appObj, 'Save', "Keep data?",
            QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,
            QtGui.QMessageBox.Yes)
        if reply == QtGui.QMessageBox.Yes:
            excelWS = CMPCommon.initExcelSpreadsheet(workbook, name, number,
                                                     timeStr, note)
            saveDPOAEDataXLS(DPOAEdata, trialDur, excelWS, saveOpts)
            #saveDPOAEData(DPOAEdata, trialDur, nReps, appObj.saveFileTxt_filepath, saveOpts, timeStr)

            saveDPOAEDataPickle(DPOAEdata, trialDur, plotName, saveOpts,
                                timeStr)
            appObj.DPOAEnumber += 1

        else:
            os.remove(plotFilePath)

    except Exception as ex:
        traceback.print_exc(file=sys.stdout)
        QtGui.QMessageBox.critical(
            appObj, "Error",
            "Error during collection. Check command line output for details")

    8  # update the audio hardware speaker calibration
    appObj.isCollecting = False
    QtGui.QApplication.processEvents(
    )  # check for GUI events, such as button presses
    appObj.finishCollection()
예제 #8
0
def runNoiseExp(appObj, testMode=False):
    print("runNoiseExp")
    appObj.tabWidget.setCurrentIndex(5)
    appObj.doneFlag = False
    appObj.isCollecting = True
    # trigRate = octfpga.GetTriggerRate()
    audioHW = appObj.audioHW
    outputRate = audioHW.DAQOutputRate
    inputRate = audioHW.DAQInputRate
    
    cutoffLow = 1e3*appObj.noiseExp_filterLow_dblSpinBox.value()
    cutoffHigh = 1e3*appObj.noiseExp_filterHigh_dblSpinBox.value()
    durationMins = appObj.noiseExp_duration_dblSpinBox.value()
    durationSecs = durationMins*60
    
    try:
        if not testMode:
            from DAQHardware import DAQHardware
            daq = DAQHardware()
    
        chanNamesIn= [ audioHW.mic_daqChan]
        micVoltsPerPascal = audioHW.micVoltsPerPascal
        # mode = 'chirp'
        
        # make 1 second of noise         
        nOut = outputRate*1  
        magResp = np.zeros(nOut)
        fIdx1 = int(nOut*cutoffLow/outputRate)
        fIdx2 = int(nOut*cutoffHigh/outputRate)
        magResp[fIdx1:fIdx2] = 1
        phaseResp = 2*np.pi*np.random.rand(nOut) - np.pi
        sig = magResp*np.exp(-1j*phaseResp)
        spkOut = np.real(np.fft.ifft(sig))
        mx = np.max(spkOut)
        mn = np.min(spkOut)
        # normalies signal to be between -1 and 1
        spkOut = 2*(spkOut - mn)/(mx - mn) - 1
        
        maxV = audioHW.speakerOutputRng[1]
        spkOut = maxV*spkOut
        
        pl = appObj.spCalTest_output
        pl.clear()
        #endIdx = int(5e-3 * outputRate)        # only plot first 5 ms
        npts = len(spkOut)
        endIdx = npts
        t = np.linspace(0, npts/outputRate, npts)
        pl.plot(t[0:endIdx], spkOut[0:endIdx], pen='b')
                
        numInputSamples = int(inputRate*0.1)
            
        if testMode:
            # mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)                    
            pass
        else:
            chanNameOut = audioHW.speakerL_daqChan 
            attenLines = audioHW.attenL_daqChan
            attenLinesOther = audioHW.attenR_daqChan
                        
            if not testMode:
                AudioHardware.Attenuator.setLevel(0, attenLines)
                AudioHardware.Attenuator.setLevel(60, attenLinesOther)
    
            # setup the output task
            daq.setupAnalogOutput([chanNameOut], audioHW.daqTrigChanIn, int(outputRate), spkOut, isContinuous=True)
            daq.startAnalogOutput()
    
            # trigger the acquiisiton by sending ditital pulse
            daq.sendDigTrig(audioHW.daqTrigChanOut)
        
        tElapsed = 0
        tLast = time.time()
        npts = numInputSamples
        t = np.linspace(0, npts/inputRate, npts)
    
        while tElapsed < durationSecs:
            if not testMode:
                # setup the input task
                daq.setupAnalogInput(chanNamesIn, audioHW.daqTrigChanIn, int(inputRate), numInputSamples) 
                daq.startAnalogInput()
            
                # trigger the acquiisiton by sending ditital pulse
                daq.sendDigTrig(audioHW.daqTrigChanOut)
                
                daq.waitDoneInput()
    
                mic_data = daq.readAnalogInput()
                mic_data = mic_data[0, :]
                
                daq.stopAnalogInput()
                daq.clearAnalogInput()
            else:
                mic_data = np.random.rand(numInputSamples)
                
            mic_data = mic_data/micVoltsPerPascal
            
            pl = appObj.spCalTest_micInput
            pl.clear()
            pl.plot(t, mic_data, pen='b')
            
            labelStyle = appObj.xLblStyle
            pl.setLabel('bottom', 'Time', 's', **labelStyle)
            labelStyle = appObj.yLblStyle
            pl.setLabel('left', 'Response', 'Pa', **labelStyle)

            # calculate RMS
            nMicPts = len(mic_data)
            micRMS = np.mean(mic_data**2)
            micRMS = 20*np.log10(micRMS**0.5/2e-5)
            appObj.spCalTest_rms_label.setText("%0.3f dB" % micRMS)            
            
            nfft = int(2**np.ceil(np.log(nMicPts*5)/np.log(2)))
            print("nfft = ", nfft)
            win_fcn = np.hanning(nMicPts)
            micFFT = 2*np.abs(np.fft.fft(win_fcn*mic_data, nfft))/nMicPts
            micFFT = 2*micFFT[0:len(micFFT) // 2]
            micFFT = 20*np.log10(micFFT/2e-5)
            freq = np.linspace(0, inputRate/2, len(micFFT))
            pl = appObj.spCalTest_micFFT
            pl.clear()
            #df = freq[1] - freq[0]
            #print("NoiseExp: df= %0.3f i1= %d i2= %d nf= %d" % (df, i1, i2, nf))
            pl.plot(freq, micFFT, pen='b')
            labelStyle = appObj.xLblStyle
            pl.setLabel('bottom', 'Frequency', 'Hz', **labelStyle)
            labelStyle = appObj.yLblStyle
            pl.setLabel('left', 'Magnitude', 'db SPL', **labelStyle)
            
            QtGui.QApplication.processEvents() # check for GUI events, such as button presses
            
            # if done flag, break out of loop
            if appObj.doneFlag:
                break      
            
            t1 = time.time()
            tElapsed += (t1 - tLast)
            tLast = t1
    
        
        if not testMode:
            daq.stopAnalogOutput()
            daq.clearAnalogOutput()
            
    except Exception as ex:
        traceback.print_exc(file=sys.stdout)
        QtGui.QMessageBox.critical (appObj, "Error", "Error during calibration. Check command line output for details")           
        
    8# update the audio hardware speaker calibration                     
    appObj.isCollecting = False
    QtGui.QApplication.processEvents() # check for GUI events, such as button presses
    appObj.finishCollection()


    
예제 #9
0
파일: CM.py 프로젝트: udayragakiran/PyCMP
def runCM(appObj, testMode=False):
    print("runCM")

    appObj.tabWidget.setCurrentIndex(4)
    appObj.doneFlag = False
    appObj.isCollecting = True
    # trigRate = octfpga.GetTriggerRate()
    audioHW = appObj.audioHW
    bioamp = appObj.bioamp
    outputRate = audioHW.DAQOutputRate
    inputRate = audioHW.DAQInputRate

    # freq_array2 = audioParams.freq[1, :]
    freqArray = appObj.getFrequencyArray()

    if testMode:
        testDataDir = os.path.join(appObj.basePath, 'exampledata',
                                   'Speaker Calibration')
#        filePath = os.path.join(testDataDir, 'AudioParams.pickle')
#        f = open(filePath, 'rb')
#        audioParams = pickle.load(f)
#        f.close()
    else:
        # freqArray = appObj.getFrequencyArray()
        i1 = appObj.CM_freqLow_comboBox.currentIndex()
        i2 = appObj.CM_freqHigh_comboBox.currentIndex()
        print("runCM: i1= ", i1, "i2= ", i2)

        ampLow = appObj.CMampLow_spinBox.value()
        ampHigh = appObj.CMampHigh_spinBox.value()
        ampDelta = appObj.CMampDelta_spinBox.value()

        # ampArray = np.arange(ampLow, ampHigh, ampDelta)
        #numSteps = np.floor((ampHigh - ampLow)/ampDelta) + 1
        #ampArray = np.linspace(ampLow, ampHigh, numSteps)
        ampArray = np.arange(ampLow, ampHigh, ampDelta)
        if ampArray[-1] != ampHigh:
            ampArray = np.hstack((ampArray, ampHigh))

        freqArray = freqArray[i1:i2 + 1]

    # numSpk = audioParams.getNumSpeakers()
    if not testMode:
        from DAQHardware import DAQHardware
        daq = DAQHardware()

    chanNamesIn = [audioHW.mic_daqChan, bioamp.daqChan]
    micVoltsPerPascal = audioHW.micVoltsPerPascal
    trialDur = appObj.CMstimDuration_dblSpinBox.value() * 1e-3
    stimOffset = appObj.CMstimOffset_dblSpinBox.value() * 1e-3
    nReps = appObj.CMtrialReps_spinBox.value()

    # set input rate to three times the highest output frequency, to allow plus a

    #inputRate = 3*freqArray[-1]
    # inputRate = outputRate / int(np.floor(outputRate / inputRate))  # pick closest input rate that evenly divides output rate

    try:
        frameNum = 0
        isSaveDirInit = False
        chanNameOut = audioHW.speakerL_daqChan
        attenLines = audioHW.attenL_daqChan

        freq_idx = 0
        CMdata = None

        for freq in freqArray:
            spkOut_trial = makeCMOutput(freq, trialDur, stimOffset, audioHW)
            spkOut = np.tile(spkOut_trial, nReps)

            npts = len(spkOut_trial)
            tOut = np.linspace(0, npts / outputRate, npts)
            print("runCM npts=%d len(spkOut_trial)= %d len(tOut)= %d" %
                  (npts, len(spkOut_trial), len(tOut)))
            amp_idx = 0
            ptsPerRep = inputRate

            for amp in ampArray:
                print("runCM freq=" + repr(freq), " amp= ", +amp,
                      " freq_idx= ", freq_idx, " amp_idx= ", amp_idx)
                vOut, attenLvl = audioHW.getCalibratedOutputVoltageAndAttenLevel(
                    freq, amp, 0)

                # attenSig = AudioHardware.makeLM1972AttenSig(0)
                if not testMode:
                    # AudioHardware.Attenuator.setLevel(attenLvl, attenLines)
                    audioHW.setAttenuatorLevel(attenLvl, audioHW.maxAtten, daq)
                    # daq.sendDigOutCmd(attenLines, attenSig)
                    # appObj.oct_hw.SetAttenLevel(0, attenLines)

                pl = appObj.spCal_output
                pl.clear()
                endIdx = int(5e-3 * outputRate)  # only plot first 5 ms
                #pl.plot(t[0:endIdx], spkOut[0:endIdx], pen='b')
                pl.plot(tOut, spkOut_trial, pen='b')

                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Time', 's', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Output', 'V', **labelStyle)

                numInputSamples = nReps * int(
                    inputRate * len(spkOut_trial) / outputRate)

                if testMode:
                    # mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)
                    pass
                else:

                    # setup the output task
                    daq.setupAnalogOutput([chanNameOut], audioHW.daqTrigChanIn,
                                          int(outputRate), vOut * spkOut)
                    daq.startAnalogOutput()

                    # setup the input task
                    daq.setupAnalogInput(chanNamesIn, audioHW.daqTrigChanIn,
                                         int(inputRate), numInputSamples)
                    daq.startAnalogInput()

                    # trigger the acquiisiton by sending ditital pulse
                    daq.sendDigTrig(audioHW.daqTrigChanOut)

                    timeout = numInputSamples / inputRate + 2
                    dataIn = daq.readAnalogInput(timeout)
                    mic_data = dataIn[0, :]
                    bioamp_data = dataIn[1, :]

                    mic_data = mic_data / micVoltsPerPascal
                    bioamp_data = bioamp_data / bioamp.gain

                    daq.waitDoneOutput(stopAndClear=True)
                    daq.stopAnalogInput()
                    daq.clearAnalogInput()

                npts = len(mic_data)
                t = np.linspace(0, npts / inputRate, npts)
                pl = appObj.spCal_micInput
                pl.clear()
                pl.plot(t, mic_data, pen='b')

                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Time', 's', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Response', 'Pa', **labelStyle)

                # def processCMData(mic_data, bioamp_data, nReps, freq, amp_idx, inputRate, CMdataIn):
                CMptData, CMdata = processCMData(mic_data, bioamp_data, nReps,
                                                 freq, freq_idx, amp_idx,
                                                 freqArray, ampArray,
                                                 inputRate, CMdata)

                print("runCM: plotting data")
                plotCMdata(appObj, CMptData, CMdata)

                #                if appObj.getSaveState():
                #                    if not isSaveDirInit:
                #                        saveDir = OCTCommon.initSaveDir(saveOpts, 'Speaker Calibration', audioParams=audioParams)
                #                        isSaveDirInit = True
                #
                #                    if saveOpts.saveRaw:
                #                        OCTCommon.saveRawData(mic_data, saveDir, frameNum, dataType=3)

                QtGui.QApplication.processEvents(
                )  # check for GUI events, such as button presses

                # if done flag, break out of loop
                if appObj.doneFlag:
                    break

                frameNum += 1
                amp_idx += 1

            # if done flag, break out of loop
            if appObj.doneFlag:
                break

            freq_idx += 1

        saveOpts = appObj.getSaveOpts()
        workbook = appObj.excelWB
        note = saveOpts.note
        number = appObj.CMnumber
        name = 'CM'
        d = datetime.datetime.now()
        timeStr = d.strftime('%H_%M_%S')
        excelWS = CMPCommon.initExcelSpreadsheet(workbook, name, number,
                                                 timeStr, note)

        appObj.CMnumber += 1
        saveOpts.saveTracings = appObj.CM_saveTracings_checkBox.isChecked()
        saveDir = appObj.saveDir_lineEdit.text()
        saveCMDataXLS(CMdata, trialDur, nReps, excelWS, saveOpts)
        #saveCMData(CMdata, trialDur, nReps, appObj.saveFileTxt_filepath, saveOpts, timeStr)

        plotName = 'CM %d %s %s' % (number, timeStr, saveOpts.note)
        saveCMDataFig(CMdata, trialDur, nReps, saveDir, plotName, timeStr)
        saveCMDataPickle(CMdata, trialDur, nReps, plotName, saveOpts, timeStr)

    except Exception as ex:
        traceback.print_exc(file=sys.stdout)
        QtGui.QMessageBox.critical(
            appObj, "Error",
            "Error during collection. Check command line output for details")

    8  # update the audio hardware speaker calibration
    appObj.isCollecting = False
    QtGui.QApplication.processEvents(
    )  # check for GUI events, such as button presses
    appObj.finishCollection()
예제 #10
0
파일: ABR.py 프로젝트: udayragakiran/PyCMP
def calibrateClick(appObj, testMode=False):
    print("ABR.calibrateClick")

    appObj.tabWidget.setCurrentIndex(3)
    appObj.doneFlag = False
    appObj.isCollecting = True
    # trigRate = octfpga.GetTriggerRate()
    audioHW = appObj.audioHW
    outputRate = audioHW.DAQOutputRate
    inputRate = audioHW.DAQInputRate

    # freq_array2 = audioParams.freq[1, :]
    freqArray = appObj.getFrequencyArray()

    if testMode:
        testDataDir = os.path.join(appObj.basePath, 'exampledata',
                                   'Speaker Calibration')


#        filePath = os.path.join(testDataDir, 'AudioParams.pickle')
#        f = open(filePath, 'rb')
#        audioParams = pickle.load(f)
#        f.close()
    else:
        pass

    # numSpk = audioParams.getNumSpeakers()
    if not testMode:
        from DAQHardware import DAQHardware
        daq = DAQHardware()

    chanNamesIn = [audioHW.mic_daqChan]
    micVoltsPerPascal = audioHW.micVoltsPerPascal
    ABRparams = ABRParams(appObj)
    ABRparams.click = True
    ABRparams.nReps = 20
    print("ABR.calibrateClick ABRparams=", ABRparams.__dict__)
    # set input rate to three times the highest output frequency, to allow plus a

    #    inputRate = 3*freqArray[-1]
    #    print("runABR: outputRate= ", outputRate, " inputRate= ", inputRate)
    #    inputRate = np.max((inputRate, 6e3))   # inpute rate should be at least 6 kHz because ABR responses occur 0.3 - 3 kHz
    #    inputRate = outputRate / int(np.floor(outputRate / inputRate))  # pick closest input rate that evenly divides output rate
    #    print("runABR: inputRate(final)= ", inputRate)

    try:
        chanNameOut = audioHW.speakerL_daqChan
        attenLines = audioHW.attenL_daqChan

        spkOut_trial = makeABROutput(4e3, ABRparams, audioHW)
        spkOut = np.tile(spkOut_trial, ABRparams.nReps)
        npts = len(spkOut_trial)
        tOut = np.linspace(0, npts / outputRate, npts)

        # attenSig = AudioHardware.makeLM1972AttenSig(0)
        if not testMode:
            AudioHardware.Attenuator.setLevel(0, attenLines)

        pl = appObj.ABR_output
        pl.clear()
        endIdx = int(5e-3 * outputRate)  # only plot first 5 ms
        #pl.plot(t[0:endIdx], spkOut[0:endIdx], pen='b')
        pl.plot(tOut, spkOut_trial, pen='b')

        labelStyle = appObj.xLblStyle
        pl.setLabel('bottom', 'Time', 's', **labelStyle)
        labelStyle = appObj.yLblStyle
        pl.setLabel('left', 'Output', 'V', **labelStyle)

        numInputSamples = int(inputRate * len(spkOut) / outputRate)

        if testMode:
            # mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)
            pass
        else:
            # setup the output task
            daq.setupAnalogOutput([chanNameOut], audioHW.daqTrigChanIn,
                                  int(outputRate), spkOut)
            daq.startAnalogOutput()

            # setup the input task
            daq.setupAnalogInput(chanNamesIn, audioHW.daqTrigChanIn,
                                 int(inputRate), numInputSamples)
            daq.startAnalogInput()

            # trigger the acquiisiton by sending ditital pulse
            daq.sendDigTrig(audioHW.daqTrigChanOut)

            timeout = numInputSamples / inputRate + 2
            dataIn = daq.readAnalogInput(timeout)
            mic_data = dataIn[0, :]

            mic_data = mic_data / micVoltsPerPascal

            daq.waitDoneInput()
            daq.stopAnalogInput()
            daq.clearAnalogInput()

            daq.waitDoneOutput(stopAndClear=True)

        print("ABR.calibrateClick: plotting data")
        npts = len(mic_data)

        # reshape and average the mic data
        ptsPerRep = npts // ABRparams.nReps
        mic_data = np.reshape(mic_data, (ABRparams.nReps, ptsPerRep))
        mic_data = np.mean(mic_data, 0)

        # plot mic data
        npts = len(mic_data)
        t = np.linspace(0, npts / inputRate, npts)
        pl = appObj.ABR_micInput
        pl.clear()
        pl.plot(t, mic_data, pen='b')

        labelStyle = appObj.xLblStyle
        pl.setLabel('bottom', 'Time', 's', **labelStyle)
        labelStyle = appObj.yLblStyle
        pl.setLabel('left', 'Response', 'Pa', **labelStyle)

        idx1 = round(inputRate * ABRparams.stimOffset)
        idx2 = idx1 + round(inputRate * ABRparams.stimDur)

        mic_data = mic_data[idx1:idx2]
        # apply high pass filter to get rid of LF components
        #        (b, a) = scipy.signal.butter(5, 100/inputRate, 'high')
        #        mic_data = scipy.signal.lfilter(b, a, mic_data)

        rms = np.mean(mic_data**2)**0.5
        rms = 20 * np.log10(rms / 2e-5)
        appObj.ABRclick_RMS = rms

        appObj.ABR_rms_label.setText("%0.1f dB" % rms)
        print("ABR.calibrateClick: RMS= ", rms)

    except Exception as ex:
        traceback.print_exc(file=sys.stdout)
        QtGui.QMessageBox.critical(
            appObj, "Error",
            "Error during collection. Check command line output for details")

    appObj.isCollecting = False
    QtGui.QApplication.processEvents(
    )  # check for GUI events, such as button presses
    appObj.finishCollection()
예제 #11
0
파일: ABR.py 프로젝트: udayragakiran/PyCMP
def runABR(appObj, testMode=False):
    print("runABR")

    appObj.tabWidget.setCurrentIndex(3)
    appObj.doneFlag = False
    appObj.isCollecting = True
    # trigRate = octfpga.GetTriggerRate()
    audioHW = appObj.audioHW
    bioamp = appObj.bioamp
    outputRate = audioHW.DAQOutputRate
    inputRate = audioHW.DAQInputRate

    # freq_array2 = audioParams.freq[1, :]
    freqArray = appObj.getFrequencyArray()
    ABRparams = ABRParams(appObj)

    if testMode:
        testDataDir = os.path.join(appObj.basePath, 'exampledata',
                                   'Speaker Calibration')
#        filePath = os.path.join(testDataDir, 'AudioParams.pickle')
#        f = open(filePath, 'rb')
#        audioParams = pickle.load(f)
#        f.close()
    else:
        # freqArray = appObj.getFrequencyArray()
        i1 = appObj.ABR_freqLow_comboBox.currentIndex()
        i2 = appObj.ABR_freqHigh_comboBox.currentIndex()
        print("runABR: i1= ", i1, "i2= ", i2)
        ampLow = appObj.ABRampLow_spinBox.value()
        ampHigh = appObj.ABRampHigh_spinBox.value()
        ampDelta = appObj.ABRampDelta_spinBox.value()

        # ampArray = np.arange(ampLow, ampHigh, ampDelta)
        #numSteps = np.floor((ampHigh - ampLow)/ampDelta) + 1
        #ampArray = np.linspace(ampLow, ampHigh, numSteps)
        if ampLow == ampHigh:
            ampArray = np.array([ampLow])
        else:
            ampArray = np.arange(ampLow, ampHigh, ampDelta)
            if ampArray[-1] != ampHigh:
                ampArray = np.hstack((ampArray, ampHigh))

        freqArray = freqArray[i1:i2 + 1]

    if ABRparams.click:
        freqArray = freqArray[0:1]  # only use single freqeucny
        clickRMS = appObj.ABRclick_RMS

    # numSpk = audioParams.getNumSpeakers()
    if not testMode:
        from DAQHardware import DAQHardware
        daq = DAQHardware()

    chanNamesIn = [audioHW.mic_daqChan, bioamp.daqChan]
    micVoltsPerPascal = audioHW.micVoltsPerPascal

    # set input rate to three times the highest output frequency, to allow plus a

    #    inputRate = 3*freqArray[-1]
    #    print("runABR: outputRate= ", outputRate, " inputRate= ", inputRate)
    #    inputRate = np.max((inputRate, 6e3))   # inpute rate should be at least 6 kHz because ABR responses occur 0.3 - 3 kHz
    #    inputRate = outputRate / int(np.floor(outputRate / inputRate))  # pick closest input rate that evenly divides output rate
    #    print("runABR: inputRate(final)= ", inputRate)

    try:
        frameNum = 0
        numFrames = len(freqArray) * len(ampArray)
        isSaveDirInit = False
        chanNameOut = audioHW.speakerL_daqChan
        attenLines = audioHW.attenL_daqChan

        freq_idx = 0
        ABRdata = None
        appObj.status_label.setText("Running")
        appObj.progressBar.setValue(0)

        for freq in freqArray:
            spkOut_trial = makeABROutput(freq, ABRparams, audioHW)
            npts = len(spkOut_trial)
            spkOut = np.tile(spkOut_trial, ABRparams.nReps)
            # invert every other trial, necessary for ABR/CAP output
            for n in range(1, ABRparams.nReps, 2):
                idx1 = n * npts
                idx2 = (n + 1) * npts
                spkOut[idx1:idx2] = -spkOut[idx1:idx2]
#            plt.figure(5)
#            plt.clf()
#            plt.plot(spkOut)
            tOut = np.linspace(0, npts / outputRate, npts)
            print("runABR npts=%d len(spkOut_trial)= %d len(tOut)= %d" %
                  (npts, len(spkOut_trial), len(tOut)))
            amp_idx = 0
            ptsPerRep = int(inputRate * ABRparams.trialDur)

            for amp in ampArray:
                print("runABR freq=" + repr(freq), " amp= ", +amp,
                      " freq_idx= ", freq_idx, " amp_idx= ", amp_idx)
                if ABRparams.click:
                    clickRMS = appObj.ABRclick_RMS
                    attenLvl = 0
                    vOut = 10**((amp - clickRMS) / 20)
                    minV = audioHW.speakerOutputRng[0]
                    if vOut < minV:
                        attenLvl = int(round(20 * np.log10(minV / vOut)))
                        vOut = minV
                else:
                    vOut, attenLvl = audioHW.getCalibratedOutputVoltageAndAttenLevel(
                        freq, amp, 0)

                print("runABR vOut= ", vOut, " atenLvl=", attenLvl)

                if vOut > audioHW.speakerOutputRng[1]:
                    print("runABR vOut= ", vOut, "  out of range")
                    continue
                elif attenLvl > audioHW.maxAtten:
                    print("runABR attenLvl= ", attenLvl,
                          "  gerater than maximum attenuation")
                    continue

                # attenSig = AudioHardware.makeLM1972AttenSig(0)
                if not testMode:
                    AudioHardware.Attenuator.setLevel(attenLvl, attenLines)
                    # daq.sendDigOutABRd(attenLines, attenSig)
                    # appObj.oct_hw.SetAttenLevel(0, attenLines)

                pl = appObj.ABR_output
                pl.clear()
                endIdx = int(5e-3 * outputRate)  # only plot first 5 ms
                #pl.plot(t[0:endIdx], spkOut[0:endIdx], pen='b')
                pl.plot(tOut, spkOut_trial, pen='b')

                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Time', 's', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Output', 'V', **labelStyle)

                numInputSamples = ABRparams.nReps * int(
                    inputRate * len(spkOut_trial) / outputRate)

                if testMode:
                    # mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)
                    pass
                else:

                    # setup the output task
                    daq.setupAnalogOutput([chanNameOut], audioHW.daqTrigChanIn,
                                          int(outputRate), vOut * spkOut)
                    daq.startAnalogOutput()

                    # setup the input task
                    daq.setupAnalogInput(chanNamesIn, audioHW.daqTrigChanIn,
                                         int(inputRate), numInputSamples)
                    daq.startAnalogInput()

                    # trigger the acquiisiton by sending ditital pulse
                    daq.sendDigTrig(audioHW.daqTrigChanOut)

                    timeout = numInputSamples / inputRate + 2
                    dataIn = daq.readAnalogInput(timeout)
                    mic_data = dataIn[0, :]
                    bioamp_data = dataIn[1, :]

                    mic_data = mic_data / micVoltsPerPascal
                    bioamp_data = bioamp_data / bioamp.gain

                    daq.waitDoneInput()
                    daq.stopAnalogInput()
                    daq.clearAnalogInput()

                    daq.waitDoneOutput(stopAndClear=True)


#                npts = len(mic_data)
#                t = np.linspace(0, npts/inputRate, npts)
#                pl = appObj.ABR_micInput
#                pl.clear()
#                pl.plot(t, mic_data, pen='b')
#
#                labelStyle = appObj.xLblStyle
#                pl.setLabel('bottom', 'Time', 's', **labelStyle)
#                labelStyle = appObj.yLblStyle
#                pl.setLabel('left', 'Response', 'Pa', **labelStyle)

# def processABRData(mic_data, bioamp_data, nReps, freq, amp_idx, inputRate, ABRdataIn):
                ABRptData, ABRdata = processABRData(mic_data, bioamp_data,
                                                    freq, freq_idx, amp_idx,
                                                    freqArray, ampArray,
                                                    inputRate, ABRdata,
                                                    ABRparams)

                print("runABR: plotting data")
                plotABRdata(appObj, ABRptData, ABRdata)

                #                if appObj.getSaveState():
                #                    if not isSaveDirInit:
                #                        saveDir = OCTCommon.initSaveDir(saveOpts, 'Speaker Calibration', audioParams=audioParams)
                #                        isSaveDirInit = True
                #
                #                    if saveOpts.saveRaw:
                #                        OCTCommon.saveRawData(mic_data, saveDir, frameNum, dataType=3)
                idx1 = round(inputRate * ABRparams.stimOffset)
                idx2 = idx1 + round(inputRate * ABRparams.stimDur)

                mic_data = mic_data[idx1:idx2]
                rms = np.mean(mic_data**2)**0.5
                rms = 20 * np.log10(rms / 2e-5)

                appObj.ABR_rms_label.setText("%0.1f dB" % rms)

                QtGui.QApplication.processEvents(
                )  # check for GUI events, such as button presses

                # if done flag, break out of loop
                if appObj.doneFlag:
                    break

                frameNum += 1
                amp_idx += 1
                appObj.progressBar.setValue(frameNum / numFrames)

            # if done flag, break out of loop
            if appObj.doneFlag:
                break

            freq_idx += 1

        saveOpts = appObj.getSaveOpts()
        workbook = appObj.excelWB
        note = saveOpts.note
        number = appObj.ABRnumber
        name = 'ABR'
        d = datetime.datetime.now()
        timeStr = d.strftime('%H_%M_%S')
        excelWS = CMPCommon.initExcelSpreadsheet(workbook, name, number,
                                                 timeStr, note)

        appObj.ABRnumber += 1
        #saveOpts.saveTracings = appObj.ABR_saveTracings_checkBox.isChecked()
        saveOpts.saveTracings = True
        saveDir = appObj.saveDir_lineEdit.text()
        saveABRDataXLS(ABRdata, ABRparams, excelWS, saveOpts)
        #saveABRData(ABRdata, trialDur, nReps, appObj.saveFileTxt_filepath, saveOpts, timeStr)

        plotName = 'ABR %d %s %s' % (number, timeStr, saveOpts.note)
        saveABRDataFig(ABRdata, ABRparams, saveDir, plotName, timeStr)
        saveABRDataPickle(ABRdata, ABRparams, plotName, saveOpts, timeStr)

    except Exception as ex:
        traceback.print_exc(file=sys.stdout)
        QtGui.QMessageBox.critical(
            appObj, "Error",
            "Error during collection. Check command line output for details")

    # update the audio hardware speaker calibration
    appObj.isCollecting = False
    QtGui.QApplication.processEvents(
    )  # check for GUI events, such as button presses
    appObj.finishCollection()
예제 #12
0
def runSpeakerCal(appObj, testMode=False):
    DebugLog.log("runSpeakerCal")
    appObj.tabWidget.setCurrentIndex(1)
    appObj.doneFlag = False
    appObj.isCollecting = True
    # trigRate = octfpga.GetTriggerRate()
    audioHW = appObj.audioHW
    outputRate = audioHW.DAQOutputRate
    inputRate = audioHW.DAQInputRate
    
    if testMode:
        testDataDir = os.path.join(appObj.basePath, 'exampledata', 'Speaker Calibration')
        filePath = os.path.join(testDataDir, 'AudioParams.pickle')
        f = open(filePath, 'rb')
        audioParams = pickle.load(f)
        f.close()
    else:
        audioParams = appObj.getAudioParams()
    numSpk = audioParams.getNumSpeakers()
    
    if not testMode:
        from DAQHardware import DAQHardware
        daq = DAQHardware()

    chanNamesIn= [ audioHW.mic_daqChan]
    micVoltsPerPascal = audioHW.micVoltsPerPascal


    spCal = None
    freq_array2 = audioParams.freq[1, :]
    try:
        frameNum = 0
        isSaveDirInit = False
        saveOpts = appObj.getSaveOpts()
        for spkNum in range(0, numSpk):
            chanNameOut = audioHW.speakerL_daqChan 
            attenLines = audioHW.attenL_daqChan
            spkIdx = 0
                
            if (numSpk == 1 and audioParams.speakerSel == Speaker.RIGHT) or spkNum == 2:
                chanNameOut = audioHW.speakerR_daqChan
                attenLines = audioHW.attenR_daqChan
                spkIdx = 1
    
            freq_array = audioParams.freq[spkIdx, :]
            if (audioParams.stimType == AudioStimType.TWO_TONE_DP) and (numSpk == 1):
                freq_array = np.concatenate((freq_array, freq_array2))
                freq_array = np.sort(freq_array)
                freq_array2 = freq_array
                
            if spCal is None:
                spCal = SpeakerCalData(np.vstack((freq_array, freq_array2)))
                
            DebugLog.log("runSpeakerCal freq_array=" + repr(freq_array))
            freq_idx = 0

            attenSig = AudioHardware.makeLM1972AttenSig(0)
            
            if not testMode:
                # daq.sendDigOutCmd(attenLines, attenSig)
                appObj.oct_hw.SetAttenLevel(0, attenLines)
            
            for freq in freq_array:
                DebugLog.log("runSpeakerCal freq=" + repr(freq))
                spkOut = makeSpeakerCalibrationOutput(freq, audioHW, audioParams)    
                npts = len(spkOut)
                t = np.linspace(0, npts/outputRate, npts)
                
                pl = appObj.plot_spkOut
                pl.clear()
                endIdx = int(5e-3 * outputRate)        # only plot first 5 ms
                pl.plot(t[0:endIdx], spkOut[0:endIdx], pen='b')
                        
                numInputSamples = int(inputRate*len(spkOut)/outputRate) 
                
                if testMode:
                    mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)                    
                else:

                    # setup the output task
                    daq.setupAnalogOutput([chanNameOut], audioHW.daqTrigChanIn, int(outputRate), spkOut)
                    daq.startAnalogOutput()
                    
                    # setup the input task
                    daq.setupAnalogInput(chanNamesIn, audioHW.daqTrigChanIn, int(inputRate), numInputSamples) 
                    daq.startAnalogInput()
                
                    # trigger the acquiisiton by sending ditital pulse
                    daq.sendDigTrig(audioHW.daqTrigChanOut)
                    
                    mic_data = daq.readAnalogInput()
                    mic_data = mic_data/micVoltsPerPascal

                
                if not testMode:
                    daq.stopAnalogInput()
                    daq.stopAnalogOutput()
                    daq.clearAnalogInput()
                    daq.clearAnalogOutput()
                
                npts = len(mic_data)
                t = np.linspace(0, npts/inputRate, npts)
                pl = appObj.plot_micRaw
                pl.clear()
                pl.plot(t, mic_data, pen='b')
                
                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Time', 's', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Response', 'Pa', **labelStyle)
                
                micData, spCal = processSpkCalData(mic_data, freq*1000, freq_idx, audioParams, inputRate, spCal, spkIdx)
                
                pl = appObj.plot_micFFT
                pl.clear()
                df = micData.fft_freq[1] - micData.fft_freq[0]
                nf = len(micData.fft_freq)
                i1 = int(1000*freq_array[0]*0.9/df)
                i2 = int(1000*freq_array[-1]*1.1/df)
                DebugLog.log("SpeakerCalibration: df= %0.3f i1= %d i2= %d nf= %d" % (df, i1, i2, nf))
                pl.plot(micData.fft_freq[i1:i2], micData.fft_mag[i1:i2], pen='b')
                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Frequency', 'Hz', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Magnitude', 'db SPL', **labelStyle)
                
                pl = appObj.plot_micMagResp
                pl.clear()
#                pl.plot(1000*spCal.freq[spkIdx, :], spCal.magResp[spkIdx, :], pen="b", symbol='o')
                pl.plot(freq_array, spCal.magResp[spkIdx, :], pen="b", symbol='o')
                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Frequency', 'Hz', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Magnitude', 'db SPL', **labelStyle)
                
                freq_idx += 1
                
                if appObj.getSaveState():
                    if not isSaveDirInit:
                        saveDir = OCTCommon.initSaveDir(saveOpts, 'Speaker Calibration', audioParams=audioParams)
                        isSaveDirInit = True
    
                    if saveOpts.saveRaw:
                        OCTCommon.saveRawData(mic_data, saveDir, frameNum, dataType=3)
                    
                QtGui.QApplication.processEvents() # check for GUI events, such as button presses
                
                # if done flag, break out of loop
                if appObj.doneFlag:
                    break
                
                frameNum += 1

                
            # if done flag, break out of loop
            if appObj.doneFlag:
                break
                
        if not appObj.doneFlag:
            saveDir = appObj.settingsPath
            saveSpeakerCal(spCal, saveDir)
            appObj.audioHW.loadSpeakerCalFromProcData(spCal)
            appObj.spCal = spCal            
            
    except Exception as ex:
        traceback.print_exc(file=sys.stdout)
        QtGui.QMessageBox.critical (appObj, "Error", "Error during scan. Check command line output for details")           
        
    8# update the audio hardware speaker calibration                     
    appObj.isCollecting = False
    QtGui.QApplication.processEvents() # check for GUI events, such as button presses
    appObj.finishCollection()


    
예제 #13
0
def runSpeakerCal(appObj, testMode=False):
    print("runSpeakerCal")
    appObj.tabWidget.setCurrentIndex(0)
    appObj.doneFlag = False
    appObj.isCollecting = True
    # trigRate = octfpga.GetTriggerRate()
    audioHW = appObj.audioHW
    outputRate = audioHW.DAQOutputRate
    inputRate = audioHW.DAQInputRate

    if testMode:
        testDataDir = os.path.join(appObj.basePath, 'exampledata',
                                   'Speaker Calibration')
#        filePath = os.path.join(testDataDir, 'AudioParams.pickle')
#        f = open(filePath, 'rb')
#        audioParams = pickle.load(f)
#        f.close()
    else:
        freqArray = appObj.getFrequencyArray()

    # numSpk = audioParams.getNumSpeakers()
    numSpk = 1
    cIdx = appObj.speaker_comboBox.currentIndex()
    if cIdx > 0:
        numSpk = 2

    if not testMode:
        from DAQHardware import DAQHardware
        daq = DAQHardware()

    chanNamesIn = [audioHW.mic_daqChan]
    micVoltsPerPascal = audioHW.micVoltsPerPascal
    # mode = 'chirp'
    mode = ''
    spCal = None
    # freq_array2 = audioParams.freq[1, :]

    try:
        frameNum = 0
        isSaveDirInit = False
        trialDur = appObj.spCal_stimDuration_dblSpinBox.value() * 1e-3

        freq_array = freqArray
        freq_array2 = freqArray / 1.22

        if numSpk == 1:
            freq_array = np.concatenate((freq_array, freq_array2))
            freq_array = np.sort(freq_array)
            freq_array2 = freq_array

        spCal = SpeakerCalData(np.vstack((freq_array, freq_array2)))

        for spkNum in range(0, numSpk):
            chanNameOut = audioHW.speakerL_daqChan
            #attenLines = audioHW.attenL_daqChan
            #attenLinesOther = audioHW.attenR_daqChan
            spkIdx = 0

            attenLvl1 = 0
            attenLvl2 = audioHW.maxAtten

            if spkNum == 2:
                #chanNameOut = audioHW.speakerR_daqChan
                #attenLines = audioHW.attenR_daqChan
                #attenLinesOther = audioHW.attenL_daqChan
                spkIdx = 1
                attenLvl1 = audioHW.maxAtten
                attenLvl2 = 0

            freq_idx = 0

            if not testMode:
                audioHW.setAttenuatorLevel(attenLvl1, attenLvl2, daq)

                # daq.sendDigOutCmd(attenLines, attenSig)
                # appObj.oct_hw.SetAttenLevel(0, attenLines)

            if mode == 'chirp':
                tChirp = 1
                f0 = 100
                f1 = 100e3
                k = (f1 - f0) / tChirp
                nChirpPts = round(outputRate * tChirp)
                t = np.linspace(0, tChirp, nChirpPts)
                spkOut = np.cos(2 * np.pi * (f1 * t + (k / 2) * t**2))

                pl = appObj.spCal_output
                pl.clear()
                endIdx = int(5e-3 * outputRate)  # only plot first 5 ms
                pl.plot(t[0:endIdx], spkOut[0:endIdx], pen='b')

                numInputSamples = int(inputRate * len(spkOut) / outputRate)

                if testMode:
                    # mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)
                    pass
                else:
                    daq.setupAnalogOutput([chanNameOut], audioHW.daqTrigChanIn,
                                          int(outputRate), spkOut)
                    daq.startAnalogOutput()

                    # setup the input task
                    daq.setupAnalogInput(chanNamesIn, audioHW.daqTrigChanIn,
                                         int(inputRate), numInputSamples)
                    daq.startAnalogInput()

                    # trigger the acquiisiton by sending ditital pulse
                    daq.sendDigTrig(audioHW.daqTrigChanOut)

                    mic_data = daq.readAnalogInput()
                    mic_data = mic_data[0, :]
                    mic_data_chirp = mic_data / micVoltsPerPascal

                if not testMode:
                    daq.waitDoneOutput(stopAndClear=True)
                    daq.stopAnalogInput()
                    daq.clearAnalogInput()

                npts = len(mic_data)
                t = np.linspace(0, npts / inputRate, npts)
                pl = appObj.spCal_micInput
                pl.clear()
                pl.plot(t, mic_data, pen='b')

                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Time', 's', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Response', 'Pa', **labelStyle)

                # play refernce tone
                refFreq = 4e3
                tRef = 50e-3

                nRefPts = round(outputRate * tRef)
                t = np.linspace(0, tRef, nRefPts)
                spkOut = np.cos(2 * np.pi * refFreq * t)

                # apply envelope
                i1 = round(outputRate * 1e-3)
                i2 = nRefPts - i1
                env = np.linspace(0, 1, i1)
                spkOut[0:i1] = spkOut[0:i1] * env
                spkOut[i2:] = spkOut[i2:] * (1 - env)

                if testMode:
                    # mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)
                    pass
                else:
                    daq.setupAnalogOutput([chanNameOut], audioHW.daqTrigChanIn,
                                          int(outputRate), spkOut)
                    daq.startAnalogOutput()

                    # setup the input task
                    daq.setupAnalogInput(chanNamesIn, audioHW.daqTrigChanIn,
                                         int(inputRate), numInputSamples)
                    daq.startAnalogInput()

                    # trigger the acquiisiton by sending ditital pulse
                    daq.sendDigTrig(audioHW.daqTrigChanOut)

                    mic_data = daq.readAnalogInput()
                    mic_data_ref = mic_data / micVoltsPerPascal

                if not testMode:
                    daq.waitDoneOutput(stopAndClear=True)
                    daq.stopAnalogInput()
                    daq.clearAnalogInput()

                micData, spCal = processSpkCalDataChirp(
                    mic_data_chirp, mic_data_ref, inputRate, spCal, spkIdx, f0,
                    f1, refFreq)

                pl = appObj.spCal_micFFT
                pl.clear()
                df = micData.fft_freq[1] - micData.fft_freq[0]
                nf = len(micData.fft_freq)
                i1 = int(freq_array[0] * 0.9 / df)
                i2 = int(freq_array[-1] * 1.1 / df)
                print("SpeakerCalibration: df= %0.3f i1= %d i2= %d nf= %d" %
                      (df, i1, i2, nf))
                pl.plot(micData.fft_freq[i1:i2],
                        micData.fft_mag[i1:i2],
                        pen='b')
                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Frequency', 'Hz', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Magnitude', 'db SPL', **labelStyle)

                pl = appObj.spCal_spkResp
                pl.clear()
                #                pl.plot(1000*spCal.freq[spkIdx, :], spCal.magResp[spkIdx, :], pen="b", symbol='o')
                pl.plot(freq_array,
                        spCal.magResp[spkIdx, :],
                        pen="b",
                        symbol='o')
                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Frequency', 'Hz', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Magnitude', 'db SPL', **labelStyle)
            else:
                for freq in freq_array:
                    print("runSpeakerCal freq=" + repr(freq))
                    spkOut = makeSpeakerCalibrationOutput(
                        freq, audioHW, trialDur)
                    npts = len(spkOut)
                    t = np.linspace(0, npts / outputRate, npts)

                    pl = appObj.spCal_output
                    pl.clear()
                    endIdx = int(5e-3 * outputRate)  # only plot first 5 ms
                    pl.plot(t[0:endIdx], spkOut[0:endIdx], pen='b')

                    numInputSamples = int(inputRate * len(spkOut) / outputRate)

                    if testMode:
                        # mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)
                        pass
                    else:

                        # setup the output task
                        daq.setupAnalogOutput([chanNameOut],
                                              audioHW.daqTrigChanIn,
                                              int(outputRate), spkOut)
                        daq.startAnalogOutput()

                        # setup the input task
                        daq.setupAnalogInput(chanNamesIn,
                                             audioHW.daqTrigChanIn,
                                             int(inputRate), numInputSamples)
                        daq.startAnalogInput()

                        # trigger the acquiisiton by sending ditital pulse
                        daq.sendDigTrig(audioHW.daqTrigChanOut)

                        mic_data = daq.readAnalogInput()
                        mic_data = mic_data[0, :]
                        mic_data = mic_data / micVoltsPerPascal

                    if not testMode:
                        daq.stopAnalogInput()
                        daq.stopAnalogOutput()
                        daq.clearAnalogInput()
                        daq.clearAnalogOutput()

                    npts = len(mic_data)
                    t = np.linspace(0, npts / inputRate, npts)
                    pl = appObj.spCal_micInput
                    pl.clear()
                    pl.plot(t, mic_data, pen='b')

                    labelStyle = appObj.xLblStyle
                    pl.setLabel('bottom', 'Time', 's', **labelStyle)
                    labelStyle = appObj.yLblStyle
                    pl.setLabel('left', 'Response', 'Pa', **labelStyle)

                    micData, spCal = processSpkCalData(mic_data, freq,
                                                       freq_idx, inputRate,
                                                       spCal, spkIdx, audioHW)

                    pl = appObj.spCal_micFFT
                    pl.clear()
                    df = micData.fft_freq[1] - micData.fft_freq[0]
                    nf = len(micData.fft_freq)
                    i1 = int(freq_array[0] * 0.9 / df)
                    i2 = int(freq_array[-1] * 1.1 / df)
                    print(
                        "SpeakerCalibration: df= %0.3f i1= %d i2= %d nf= %d" %
                        (df, i1, i2, nf))
                    pl.plot(micData.fft_freq[i1:i2],
                            micData.fft_mag[i1:i2],
                            pen='b')
                    labelStyle = appObj.xLblStyle
                    pl.setLabel('bottom', 'Frequency', 'Hz', **labelStyle)
                    labelStyle = appObj.yLblStyle
                    pl.setLabel('left', 'Magnitude', 'db SPL', **labelStyle)

                    pl = appObj.spCal_spkResp
                    pl.clear()
                    #                pl.plot(1000*spCal.freq[spkIdx, :], spCal.magResp[spkIdx, :], pen="b", symbol='o')
                    pl.plot(freq_array,
                            spCal.magResp[spkIdx, :],
                            pen="b",
                            symbol='o')
                    labelStyle = appObj.xLblStyle
                    pl.setLabel('bottom', 'Frequency', 'Hz', **labelStyle)
                    labelStyle = appObj.yLblStyle
                    pl.setLabel('left', 'Magnitude', 'db SPL', **labelStyle)

                    freq_idx += 1

                    #                if appObj.getSaveState():
                    #                    if not isSaveDirInit:
                    #                        saveDir = OCTCommon.initSaveDir(saveOpts, 'Speaker Calibration', audioParams=audioParams)
                    #                        isSaveDirInit = True
                    #
                    #                    if saveOpts.saveRaw:
                    #                        OCTCommon.saveRawData(mic_data, saveDir, frameNum, dataType=3)

                    QtGui.QApplication.processEvents(
                    )  # check for GUI events, such as button presses

                    # if done flag, break out of loop
                    if appObj.doneFlag:
                        break

                frameNum += 1

            # if done flag, break out of loop
            if appObj.doneFlag:
                break

        if not appObj.doneFlag:
            saveDir = appObj.configPath
            saveSpeakerCal(spCal, saveDir)
            appObj.audioHW.loadSpeakerCalFromProcData(spCal)
            appObj.spCal = spCal

    except Exception as ex:
        traceback.print_exc(file=sys.stdout)
        QtGui.QMessageBox.critical(
            appObj, "Error",
            "Error during calibration. Check command line output for details")

    8  # update the audio hardware speaker calibration
    appObj.isCollecting = False
    QtGui.QApplication.processEvents(
    )  # check for GUI events, such as button presses
    appObj.finishCollection()
예제 #14
0
파일: CM.py 프로젝트: udayragakiran/PyCMP
def runCM(appObj, testMode=False):
    print("runCM")
    
    appObj.tabWidget.setCurrentIndex(4)
    appObj.doneFlag = False
    appObj.isCollecting = True
    # trigRate = octfpga.GetTriggerRate()
    audioHW = appObj.audioHW
    bioamp = appObj.bioamp
    outputRate = audioHW.DAQOutputRate
    inputRate = audioHW.DAQInputRate

    # freq_array2 = audioParams.freq[1, :]
    freqArray = appObj.getFrequencyArray()
    
    if testMode:
        testDataDir = os.path.join(appObj.basePath, 'exampledata', 'Speaker Calibration')
#        filePath = os.path.join(testDataDir, 'AudioParams.pickle')
#        f = open(filePath, 'rb')
#        audioParams = pickle.load(f)
#        f.close()
    else:
        # freqArray = appObj.getFrequencyArray()
        i1 = appObj.CM_freqLow_comboBox.currentIndex()
        i2 = appObj.CM_freqHigh_comboBox.currentIndex()
        print("runCM: i1= ", i1, "i2= ", i2)

        ampLow = appObj.CMampLow_spinBox.value()
        ampHigh = appObj.CMampHigh_spinBox.value()
        ampDelta = appObj.CMampDelta_spinBox.value()
        
        # ampArray = np.arange(ampLow, ampHigh, ampDelta)
        #numSteps = np.floor((ampHigh - ampLow)/ampDelta) + 1
        #ampArray = np.linspace(ampLow, ampHigh, numSteps)
        ampArray = np.arange(ampLow, ampHigh, ampDelta)
        if ampArray[-1] != ampHigh:
            ampArray = np.hstack((ampArray, ampHigh))
        
        freqArray = freqArray[i1:i2+1]

    # numSpk = audioParams.getNumSpeakers()
    if not testMode:
        from DAQHardware import DAQHardware
        daq = DAQHardware()

    chanNamesIn= [ audioHW.mic_daqChan, bioamp.daqChan]
    micVoltsPerPascal = audioHW.micVoltsPerPascal
    trialDur = appObj.CMstimDuration_dblSpinBox.value() * 1e-3
    stimOffset = appObj.CMstimOffset_dblSpinBox.value() * 1e-3
    nReps = appObj.CMtrialReps_spinBox.value()

    # set input rate to three times the highest output frequency, to allow plus a 
    
    #inputRate = 3*freqArray[-1]
    # inputRate = outputRate / int(np.floor(outputRate / inputRate))  # pick closest input rate that evenly divides output rate
    
    
    try:
        frameNum = 0
        isSaveDirInit = False
        chanNameOut = audioHW.speakerL_daqChan 
        attenLines = audioHW.attenL_daqChan
        
        freq_idx = 0
        CMdata = None
        
        for freq in freqArray:
            spkOut_trial = makeCMOutput(freq, trialDur, stimOffset, audioHW)
            spkOut = np.tile(spkOut_trial, nReps)
            
            npts = len(spkOut_trial)
            tOut = np.linspace(0, npts/outputRate, npts)
            print("runCM npts=%d len(spkOut_trial)= %d len(tOut)= %d" % (npts, len(spkOut_trial), len(tOut)))
            amp_idx = 0
            ptsPerRep = inputRate
            
            for amp in ampArray:
                print("runCM freq=" + repr(freq), " amp= ", + amp, " freq_idx= ", freq_idx, " amp_idx= ", amp_idx)
                vOut, attenLvl = audioHW.getCalibratedOutputVoltageAndAttenLevel(freq, amp, 0)
                
                # attenSig = AudioHardware.makeLM1972AttenSig(0)
                if not testMode:
                    # AudioHardware.Attenuator.setLevel(attenLvl, attenLines)
                    audioHW.setAttenuatorLevel(attenLvl, audioHW.maxAtten, daq)
                    # daq.sendDigOutCmd(attenLines, attenSig)
                    # appObj.oct_hw.SetAttenLevel(0, attenLines)
                
                pl = appObj.spCal_output
                pl.clear()
                endIdx = int(5e-3 * outputRate)        # only plot first 5 ms
                #pl.plot(t[0:endIdx], spkOut[0:endIdx], pen='b')
                pl.plot(tOut, spkOut_trial, pen='b')
                
                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Time', 's', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Output', 'V', **labelStyle)
                        
                
                numInputSamples = nReps*int(inputRate*len(spkOut_trial)/outputRate)
                
                
                if testMode:
                    # mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)                    
                    pass
                else:
    
                    # setup the output task
                    daq.setupAnalogOutput([chanNameOut], audioHW.daqTrigChanIn, int(outputRate), vOut*spkOut)
                    daq.startAnalogOutput()
                    
                    # setup the input task
                    daq.setupAnalogInput(chanNamesIn, audioHW.daqTrigChanIn, int(inputRate), numInputSamples) 
                    daq.startAnalogInput()
                
                    # trigger the acquiisiton by sending ditital pulse
                    daq.sendDigTrig(audioHW.daqTrigChanOut)
                    
                    timeout = numInputSamples/inputRate + 2
                    dataIn = daq.readAnalogInput(timeout)
                    mic_data = dataIn[0, :]
                    bioamp_data = dataIn[1, :]
                    
                    mic_data = mic_data/micVoltsPerPascal
                    bioamp_data = bioamp_data/bioamp.gain
                
                    daq.waitDoneOutput(stopAndClear=True)
                    daq.stopAnalogInput()
                    daq.clearAnalogInput()
                
                npts = len(mic_data)
                t = np.linspace(0, npts/inputRate, npts)
                pl = appObj.spCal_micInput
                pl.clear()
                pl.plot(t, mic_data, pen='b')
                
                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Time', 's', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Response', 'Pa', **labelStyle)
    
    # def processCMData(mic_data, bioamp_data, nReps, freq, amp_idx, inputRate, CMdataIn):            
                CMptData, CMdata = processCMData(mic_data, bioamp_data, nReps, freq, freq_idx, amp_idx, freqArray, ampArray, inputRate, CMdata)

                print("runCM: plotting data")
                plotCMdata(appObj, CMptData, CMdata)
                
    #                if appObj.getSaveState():
    #                    if not isSaveDirInit:
    #                        saveDir = OCTCommon.initSaveDir(saveOpts, 'Speaker Calibration', audioParams=audioParams)
    #                        isSaveDirInit = True
    #    
    #                    if saveOpts.saveRaw:
    #                        OCTCommon.saveRawData(mic_data, saveDir, frameNum, dataType=3)
                    
                QtGui.QApplication.processEvents() # check for GUI events, such as button presses
                
                # if done flag, break out of loop
                if appObj.doneFlag:
                    break
                
                frameNum += 1
                amp_idx += 1
                
            # if done flag, break out of loop
            if appObj.doneFlag:
                break
            
            freq_idx += 1


        saveOpts = appObj.getSaveOpts()
        workbook = appObj.excelWB
        note = saveOpts.note
        number = appObj.CMnumber
        name = 'CM'
        d = datetime.datetime.now()
        timeStr = d.strftime('%H_%M_%S')
        excelWS = CMPCommon.initExcelSpreadsheet(workbook, name, number, timeStr, note)
    
        appObj.CMnumber += 1                
        saveOpts.saveTracings = appObj.CM_saveTracings_checkBox.isChecked(  )
        saveDir = appObj.saveDir_lineEdit.text()
        saveCMDataXLS(CMdata, trialDur, nReps, excelWS, saveOpts)
        #saveCMData(CMdata, trialDur, nReps, appObj.saveFileTxt_filepath, saveOpts, timeStr)
        
        plotName = 'CM %d %s %s' % (number, timeStr, saveOpts.note)
        saveCMDataFig(CMdata, trialDur, nReps, saveDir, plotName, timeStr)
        saveCMDataPickle(CMdata, trialDur, nReps, plotName, saveOpts, timeStr)
            
    except Exception as ex:
        traceback.print_exc(file=sys.stdout)
        QtGui.QMessageBox.critical (appObj, "Error", "Error during collection. Check command line output for details")
        
    8# update the audio hardware speaker calibration                     
    appObj.isCollecting = False
    QtGui.QApplication.processEvents() # check for GUI events, such as button presses
    appObj.finishCollection()