Beispiel #1
0
def calibrateClick(appObj, testMode=False):
    print("ABR.calibrateClick")
    
    appObj.tabWidget.setCurrentIndex(3)
    appObj.doneFlag = False
    appObj.isCollecting = True
    # trigRate = octfpga.GetTriggerRate()
    audioHW = appObj.audioHW
    outputRate = audioHW.DAQOutputRate
    inputRate = audioHW.DAQInputRate

    # freq_array2 = audioParams.freq[1, :]
    freqArray = appObj.getFrequencyArray()
    
    if testMode:
        testDataDir = os.path.join(appObj.basePath, 'exampledata', 'Speaker Calibration')
#        filePath = os.path.join(testDataDir, 'AudioParams.pickle')
#        f = open(filePath, 'rb')
#        audioParams = pickle.load(f)
#        f.close()
    else:
        pass

    # numSpk = audioParams.getNumSpeakers()
    if not testMode:
        from DAQHardware import DAQHardware
        daq = DAQHardware()

    chanNamesIn= [ audioHW.mic_daqChan]
    micVoltsPerPascal = audioHW.micVoltsPerPascal
    ABRparams = ABRParams(appObj)
    ABRparams.click = True
    ABRparams.nReps = 20
    print("ABR.calibrateClick ABRparams=", ABRparams.__dict__)
    # set input rate to three times the highest output frequency, to allow plus a 
    
#    inputRate = 3*freqArray[-1]
#    print("runABR: outputRate= ", outputRate, " inputRate= ", inputRate)
#    inputRate = np.max((inputRate, 6e3))   # inpute rate should be at least 6 kHz because ABR responses occur 0.3 - 3 kHz
#    inputRate = outputRate / int(np.floor(outputRate / inputRate))  # pick closest input rate that evenly divides output rate
#    print("runABR: inputRate(final)= ", inputRate)
    
    try:
        chanNameOut = audioHW.speakerL_daqChan 
        attenLines = audioHW.attenL_daqChan
        
        spkOut_trial = makeABROutput(4e3, ABRparams, audioHW)
        spkOut = np.tile(spkOut_trial, ABRparams.nReps)
        npts = len(spkOut_trial)
        tOut = np.linspace(0, npts/outputRate, npts)
            
        # attenSig = AudioHardware.makeLM1972AttenSig(0)
        if not testMode:
            AudioHardware.Attenuator.setLevel(0, attenLines)
                
        pl = appObj.ABR_output
        pl.clear()
        endIdx = int(5e-3 * outputRate)        # only plot first 5 ms
        #pl.plot(t[0:endIdx], spkOut[0:endIdx], pen='b')
        pl.plot(tOut, spkOut_trial, pen='b')
                
        labelStyle = appObj.xLblStyle
        pl.setLabel('bottom', 'Time', 's', **labelStyle)
        labelStyle = appObj.yLblStyle
        pl.setLabel('left', 'Output', 'V', **labelStyle)
                
        numInputSamples = int(inputRate*len(spkOut)/outputRate)
        
        if testMode:
            # mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)                    
            pass
        else:
            # setup the output task
            daq.setupAnalogOutput([chanNameOut], audioHW.daqTrigChanIn, int(outputRate), spkOut)
            daq.startAnalogOutput()
            
            # setup the input task
            daq.setupAnalogInput(chanNamesIn, audioHW.daqTrigChanIn, int(inputRate), numInputSamples) 
            daq.startAnalogInput()
        
            # trigger the acquiisiton by sending ditital pulse
            daq.sendDigTrig(audioHW.daqTrigChanOut)
            
            timeout = numInputSamples/inputRate + 2
            dataIn = daq.readAnalogInput(timeout)
            mic_data = dataIn[0, :]
            
            mic_data = mic_data/micVoltsPerPascal
        
            daq.waitDoneInput()
            daq.stopAnalogInput()
            daq.clearAnalogInput()
            
            daq.waitDoneOutput(stopAndClear=True)
        
        print("ABR.calibrateClick: plotting data")
        npts = len(mic_data)
        
        # reshape and average the mic data
        ptsPerRep = npts // ABRparams.nReps
        mic_data = np.reshape(mic_data, (ABRparams.nReps, ptsPerRep))
        mic_data = np.mean(mic_data, 0)
        
        # plot mic data
        npts = len(mic_data)
        t = np.linspace(0, npts/inputRate, npts)
        pl = appObj.ABR_micInput
        pl.clear()
        pl.plot(t, mic_data, pen='b')
        
        labelStyle = appObj.xLblStyle
        pl.setLabel('bottom', 'Time', 's', **labelStyle)
        labelStyle = appObj.yLblStyle
        pl.setLabel('left', 'Response', 'Pa', **labelStyle)
        
        idx1 = round(inputRate*ABRparams.stimOffset)
        idx2 = idx1 + round(inputRate*ABRparams.stimDur)
        
        mic_data = mic_data[idx1:idx2] 
        # apply high pass filter to get rid of LF components
#        (b, a) = scipy.signal.butter(5, 100/inputRate, 'high')
#        mic_data = scipy.signal.lfilter(b, a, mic_data) 

        rms = np.mean(mic_data ** 2) ** 0.5
        rms = 20*np.log10(rms/2e-5)
        appObj.ABRclick_RMS = rms
        
        appObj.ABR_rms_label.setText("%0.1f dB" % rms)
        print("ABR.calibrateClick: RMS= ", rms)
        
    except Exception as ex:
        traceback.print_exc(file=sys.stdout)
        QtGui.QMessageBox.critical (appObj, "Error", "Error during collection. Check command line output for details")           
        
    appObj.isCollecting = False
    QtGui.QApplication.processEvents() # check for GUI events, such as button presses
    appObj.finishCollection()
def runSpeakerCal(appObj, testMode=False):
    print("runSpeakerCal")
    appObj.tabWidget.setCurrentIndex(0)
    appObj.doneFlag = False
    appObj.isCollecting = True
    # trigRate = octfpga.GetTriggerRate()
    audioHW = appObj.audioHW
    outputRate = audioHW.DAQOutputRate
    inputRate = audioHW.DAQInputRate
    
    if testMode:
        testDataDir = os.path.join(appObj.basePath, 'exampledata', 'Speaker Calibration')
#        filePath = os.path.join(testDataDir, 'AudioParams.pickle')
#        f = open(filePath, 'rb')
#        audioParams = pickle.load(f)
#        f.close()
    else:
        freqArray = appObj.getFrequencyArray()
        
    # numSpk = audioParams.getNumSpeakers()
    numSpk = 1
    cIdx = appObj.speaker_comboBox.currentIndex()
    if cIdx > 0:
        numSpk = 2
    
    if not testMode:
        from DAQHardware import DAQHardware
        daq = DAQHardware()

    chanNamesIn= [ audioHW.mic_daqChan]
    micVoltsPerPascal = audioHW.micVoltsPerPascal
    # mode = 'chirp'
    mode = ''
    spCal = None
    # freq_array2 = audioParams.freq[1, :]
    
    try:
        frameNum = 0
        isSaveDirInit = False
        trialDur = appObj.spCal_stimDuration_dblSpinBox.value()*1e-3
        
        freq_array = freqArray
        freq_array2 = freqArray/1.22

        if numSpk == 1:
            freq_array = np.concatenate((freq_array, freq_array2))
            freq_array = np.sort(freq_array)
            freq_array2 = freq_array
        
        spCal = SpeakerCalData(np.vstack((freq_array, freq_array2)))            
        
        for spkNum in range(0, numSpk):
            chanNameOut = audioHW.speakerL_daqChan 
            #attenLines = audioHW.attenL_daqChan
            #attenLinesOther = audioHW.attenR_daqChan
            spkIdx = 0

            attenLvl1 = 0
            attenLvl2 = audioHW.maxAtten
                
            if spkNum == 2:
                #chanNameOut = audioHW.speakerR_daqChan
                #attenLines = audioHW.attenR_daqChan
                #attenLinesOther = audioHW.attenL_daqChan
                spkIdx = 1
                attenLvl1 = audioHW.maxAtten
                attenLvl2 = 0
    
            freq_idx = 0


            if not testMode:
                audioHW.setAttenuatorLevel(attenLvl1, attenLvl2, daq)
                
                # daq.sendDigOutCmd(attenLines, attenSig)
                # appObj.oct_hw.SetAttenLevel(0, attenLines)
                
            if mode == 'chirp':
                tChirp = 1    
                f0 = 100
                f1 = 100e3
                k = (f1- f0)/tChirp
                nChirpPts = round(outputRate*tChirp)
                t = np.linspace(0, tChirp, nChirpPts)
                spkOut = np.cos(2*np.pi*(f1*t + (k/2)*t**2))
                
                pl = appObj.spCal_output
                pl.clear()
                endIdx = int(5e-3 * outputRate)        # only plot first 5 ms
                pl.plot(t[0:endIdx], spkOut[0:endIdx], pen='b')
                        
                numInputSamples = int(inputRate*len(spkOut)/outputRate) 
                
                if testMode:
                    # mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)                    
                    pass
                else:
                    daq.setupAnalogOutput([chanNameOut], audioHW.daqTrigChanIn, int(outputRate), spkOut)
                    daq.startAnalogOutput()
                    
                    # setup the input task
                    daq.setupAnalogInput(chanNamesIn, audioHW.daqTrigChanIn, int(inputRate), numInputSamples) 
                    daq.startAnalogInput()
                
                    # trigger the acquiisiton by sending ditital pulse
                    daq.sendDigTrig(audioHW.daqTrigChanOut)
                    
                    mic_data = daq.readAnalogInput()
                    mic_data = mic_data[0, :]
                    mic_data_chirp = mic_data/micVoltsPerPascal

                
                if not testMode:
                    daq.waitDoneOutput(stopAndClear=True)
                    daq.stopAnalogInput()
                    daq.clearAnalogInput()
                    
                npts = len(mic_data)
                t = np.linspace(0, npts/inputRate, npts)
                pl = appObj.spCal_micInput
                pl.clear()
                pl.plot(t, mic_data, pen='b')
                
                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Time', 's', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Response', 'Pa', **labelStyle)
                
                # play refernce tone
                refFreq = 4e3
                tRef = 50e-3
                
                nRefPts = round(outputRate*tRef)
                t = np.linspace(0, tRef, nRefPts)
                spkOut = np.cos(2*np.pi*refFreq*t)
                
                # apply envelope
                i1 = round(outputRate*1e-3)
                i2 = nRefPts- i1
                env = np.linspace(0, 1, i1)
                spkOut[0:i1] = spkOut[0:i1]*env
                spkOut[i2:] = spkOut[i2:]*(1-env)
                
                if testMode:
                    # mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)                    
                    pass
                else:
                    daq.setupAnalogOutput([chanNameOut], audioHW.daqTrigChanIn, int(outputRate), spkOut)
                    daq.startAnalogOutput()
                    
                    # setup the input task
                    daq.setupAnalogInput(chanNamesIn, audioHW.daqTrigChanIn, int(inputRate), numInputSamples) 
                    daq.startAnalogInput()
                
                    # trigger the acquiisiton by sending ditital pulse
                    daq.sendDigTrig(audioHW.daqTrigChanOut)
                    
                    mic_data = daq.readAnalogInput()
                    mic_data_ref = mic_data/micVoltsPerPascal
                    
                if not testMode:
                    daq.waitDoneOutput(stopAndClear=True)
                    daq.stopAnalogInput()
                    daq.clearAnalogInput()
                
                micData, spCal = processSpkCalDataChirp(mic_data_chirp, mic_data_ref, inputRate, spCal, spkIdx, f0, f1, refFreq)
                    
                pl = appObj.spCal_micFFT
                pl.clear()
                df = micData.fft_freq[1] - micData.fft_freq[0]
                nf = len(micData.fft_freq)
                i1 = int(freq_array[0]*0.9/df)
                i2 = int(freq_array[-1]*1.1/df)
                print("SpeakerCalibration: df= %0.3f i1= %d i2= %d nf= %d" % (df, i1, i2, nf))
                pl.plot(micData.fft_freq[i1:i2], micData.fft_mag[i1:i2], pen='b')
                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Frequency', 'Hz', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Magnitude', 'db SPL', **labelStyle)
                
                pl = appObj.spCal_spkResp
                pl.clear()
#                pl.plot(1000*spCal.freq[spkIdx, :], spCal.magResp[spkIdx, :], pen="b", symbol='o')
                pl.plot(freq_array, spCal.magResp[spkIdx, :], pen="b", symbol='o')
                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Frequency', 'Hz', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Magnitude', 'db SPL', **labelStyle)
            else:
                for freq in freq_array:
                    print("runSpeakerCal freq=" + repr(freq))
                    spkOut = makeSpeakerCalibrationOutput(freq, audioHW, trialDur)    
                    npts = len(spkOut)
                    t = np.linspace(0, npts/outputRate, npts)
                    
                    pl = appObj.spCal_output
                    pl.clear()
                    endIdx = int(5e-3 * outputRate)        # only plot first 5 ms
                    pl.plot(t[0:endIdx], spkOut[0:endIdx], pen='b')
                            
                    numInputSamples = int(inputRate*len(spkOut)/outputRate) 
                    
                    if testMode:
                        # mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)                    
                        pass
                    else:
    
                        # setup the output task
                        daq.setupAnalogOutput([chanNameOut], audioHW.daqTrigChanIn, int(outputRate), spkOut)
                        daq.startAnalogOutput()
                        
                        # setup the input task
                        daq.setupAnalogInput(chanNamesIn, audioHW.daqTrigChanIn, int(inputRate), numInputSamples) 
                        daq.startAnalogInput()
                    
                        # trigger the acquiisiton by sending ditital pulse
                        daq.sendDigTrig(audioHW.daqTrigChanOut)
                        
                        mic_data = daq.readAnalogInput()
                        mic_data = mic_data[0, :]
                        mic_data = mic_data/micVoltsPerPascal
    
                    
                    if not testMode:
                        daq.stopAnalogInput()
                        daq.stopAnalogOutput()
                        daq.clearAnalogInput()
                        daq.clearAnalogOutput()
                    
                    npts = len(mic_data)
                    t = np.linspace(0, npts/inputRate, npts)
                    pl = appObj.spCal_micInput
                    pl.clear()
                    pl.plot(t, mic_data, pen='b')
                    
                    labelStyle = appObj.xLblStyle
                    pl.setLabel('bottom', 'Time', 's', **labelStyle)
                    labelStyle = appObj.yLblStyle
                    pl.setLabel('left', 'Response', 'Pa', **labelStyle)
                    
                    micData, spCal = processSpkCalData(mic_data, freq, freq_idx, inputRate, spCal, spkIdx, audioHW)
                    
                    pl = appObj.spCal_micFFT
                    pl.clear()
                    df = micData.fft_freq[1] - micData.fft_freq[0]
                    nf = len(micData.fft_freq)
                    i1 = int(freq_array[0]*0.9/df)
                    i2 = int(freq_array[-1]*1.1/df)
                    print("SpeakerCalibration: df= %0.3f i1= %d i2= %d nf= %d" % (df, i1, i2, nf))
                    pl.plot(micData.fft_freq[i1:i2], micData.fft_mag[i1:i2], pen='b')
                    labelStyle = appObj.xLblStyle
                    pl.setLabel('bottom', 'Frequency', 'Hz', **labelStyle)
                    labelStyle = appObj.yLblStyle
                    pl.setLabel('left', 'Magnitude', 'db SPL', **labelStyle)
                    
                    pl = appObj.spCal_spkResp
                    pl.clear()
    #                pl.plot(1000*spCal.freq[spkIdx, :], spCal.magResp[spkIdx, :], pen="b", symbol='o')
                    pl.plot(freq_array, spCal.magResp[spkIdx, :], pen="b", symbol='o')
                    labelStyle = appObj.xLblStyle
                    pl.setLabel('bottom', 'Frequency', 'Hz', **labelStyle)
                    labelStyle = appObj.yLblStyle
                    pl.setLabel('left', 'Magnitude', 'db SPL', **labelStyle)
                    
                    freq_idx += 1
                    
    #                if appObj.getSaveState():
    #                    if not isSaveDirInit:
    #                        saveDir = OCTCommon.initSaveDir(saveOpts, 'Speaker Calibration', audioParams=audioParams)
    #                        isSaveDirInit = True
    #    
    #                    if saveOpts.saveRaw:
    #                        OCTCommon.saveRawData(mic_data, saveDir, frameNum, dataType=3)
                        
                    QtGui.QApplication.processEvents() # check for GUI events, such as button presses
                    
                    # if done flag, break out of loop
                    if appObj.doneFlag:
                        break
                
                frameNum += 1

                
            # if done flag, break out of loop
            if appObj.doneFlag:
                break
                
        if not appObj.doneFlag:
            saveDir = appObj.configPath
            saveSpeakerCal(spCal, saveDir)
            appObj.audioHW.loadSpeakerCalFromProcData(spCal)
            appObj.spCal = spCal            
            
    except Exception as ex:
        traceback.print_exc(file=sys.stdout)
        QtGui.QMessageBox.critical (appObj, "Error", "Error during calibration. Check command line output for details")           
        
    8# update the audio hardware speaker calibration                     
    appObj.isCollecting = False
    QtGui.QApplication.processEvents() # check for GUI events, such as button presses
    appObj.finishCollection()


    
Beispiel #3
0
def runCM(appObj, testMode=False):
    print("runCM")

    appObj.tabWidget.setCurrentIndex(4)
    appObj.doneFlag = False
    appObj.isCollecting = True
    # trigRate = octfpga.GetTriggerRate()
    audioHW = appObj.audioHW
    bioamp = appObj.bioamp
    outputRate = audioHW.DAQOutputRate
    inputRate = audioHW.DAQInputRate

    # freq_array2 = audioParams.freq[1, :]
    freqArray = appObj.getFrequencyArray()

    if testMode:
        testDataDir = os.path.join(appObj.basePath, 'exampledata',
                                   'Speaker Calibration')
#        filePath = os.path.join(testDataDir, 'AudioParams.pickle')
#        f = open(filePath, 'rb')
#        audioParams = pickle.load(f)
#        f.close()
    else:
        # freqArray = appObj.getFrequencyArray()
        i1 = appObj.CM_freqLow_comboBox.currentIndex()
        i2 = appObj.CM_freqHigh_comboBox.currentIndex()
        print("runCM: i1= ", i1, "i2= ", i2)

        ampLow = appObj.CMampLow_spinBox.value()
        ampHigh = appObj.CMampHigh_spinBox.value()
        ampDelta = appObj.CMampDelta_spinBox.value()

        # ampArray = np.arange(ampLow, ampHigh, ampDelta)
        #numSteps = np.floor((ampHigh - ampLow)/ampDelta) + 1
        #ampArray = np.linspace(ampLow, ampHigh, numSteps)
        ampArray = np.arange(ampLow, ampHigh, ampDelta)
        if ampArray[-1] != ampHigh:
            ampArray = np.hstack((ampArray, ampHigh))

        freqArray = freqArray[i1:i2 + 1]

    # numSpk = audioParams.getNumSpeakers()
    if not testMode:
        from DAQHardware import DAQHardware
        daq = DAQHardware()

    chanNamesIn = [audioHW.mic_daqChan, bioamp.daqChan]
    micVoltsPerPascal = audioHW.micVoltsPerPascal
    trialDur = appObj.CMstimDuration_dblSpinBox.value() * 1e-3
    stimOffset = appObj.CMstimOffset_dblSpinBox.value() * 1e-3
    nReps = appObj.CMtrialReps_spinBox.value()

    # set input rate to three times the highest output frequency, to allow plus a

    #inputRate = 3*freqArray[-1]
    # inputRate = outputRate / int(np.floor(outputRate / inputRate))  # pick closest input rate that evenly divides output rate

    try:
        frameNum = 0
        isSaveDirInit = False
        chanNameOut = audioHW.speakerL_daqChan
        attenLines = audioHW.attenL_daqChan

        freq_idx = 0
        CMdata = None

        for freq in freqArray:
            spkOut_trial = makeCMOutput(freq, trialDur, stimOffset, audioHW)
            spkOut = np.tile(spkOut_trial, nReps)

            npts = len(spkOut_trial)
            tOut = np.linspace(0, npts / outputRate, npts)
            print("runCM npts=%d len(spkOut_trial)= %d len(tOut)= %d" %
                  (npts, len(spkOut_trial), len(tOut)))
            amp_idx = 0
            ptsPerRep = inputRate

            for amp in ampArray:
                print("runCM freq=" + repr(freq), " amp= ", +amp,
                      " freq_idx= ", freq_idx, " amp_idx= ", amp_idx)
                vOut, attenLvl = audioHW.getCalibratedOutputVoltageAndAttenLevel(
                    freq, amp, 0)

                # attenSig = AudioHardware.makeLM1972AttenSig(0)
                if not testMode:
                    # AudioHardware.Attenuator.setLevel(attenLvl, attenLines)
                    audioHW.setAttenuatorLevel(attenLvl, audioHW.maxAtten, daq)
                    # daq.sendDigOutCmd(attenLines, attenSig)
                    # appObj.oct_hw.SetAttenLevel(0, attenLines)

                pl = appObj.spCal_output
                pl.clear()
                endIdx = int(5e-3 * outputRate)  # only plot first 5 ms
                #pl.plot(t[0:endIdx], spkOut[0:endIdx], pen='b')
                pl.plot(tOut, spkOut_trial, pen='b')

                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Time', 's', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Output', 'V', **labelStyle)

                numInputSamples = nReps * int(
                    inputRate * len(spkOut_trial) / outputRate)

                if testMode:
                    # mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)
                    pass
                else:

                    # setup the output task
                    daq.setupAnalogOutput([chanNameOut], audioHW.daqTrigChanIn,
                                          int(outputRate), vOut * spkOut)
                    daq.startAnalogOutput()

                    # setup the input task
                    daq.setupAnalogInput(chanNamesIn, audioHW.daqTrigChanIn,
                                         int(inputRate), numInputSamples)
                    daq.startAnalogInput()

                    # trigger the acquiisiton by sending ditital pulse
                    daq.sendDigTrig(audioHW.daqTrigChanOut)

                    timeout = numInputSamples / inputRate + 2
                    dataIn = daq.readAnalogInput(timeout)
                    mic_data = dataIn[0, :]
                    bioamp_data = dataIn[1, :]

                    mic_data = mic_data / micVoltsPerPascal
                    bioamp_data = bioamp_data / bioamp.gain

                    daq.waitDoneOutput(stopAndClear=True)
                    daq.stopAnalogInput()
                    daq.clearAnalogInput()

                npts = len(mic_data)
                t = np.linspace(0, npts / inputRate, npts)
                pl = appObj.spCal_micInput
                pl.clear()
                pl.plot(t, mic_data, pen='b')

                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Time', 's', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Response', 'Pa', **labelStyle)

                # def processCMData(mic_data, bioamp_data, nReps, freq, amp_idx, inputRate, CMdataIn):
                CMptData, CMdata = processCMData(mic_data, bioamp_data, nReps,
                                                 freq, freq_idx, amp_idx,
                                                 freqArray, ampArray,
                                                 inputRate, CMdata)

                print("runCM: plotting data")
                plotCMdata(appObj, CMptData, CMdata)

                #                if appObj.getSaveState():
                #                    if not isSaveDirInit:
                #                        saveDir = OCTCommon.initSaveDir(saveOpts, 'Speaker Calibration', audioParams=audioParams)
                #                        isSaveDirInit = True
                #
                #                    if saveOpts.saveRaw:
                #                        OCTCommon.saveRawData(mic_data, saveDir, frameNum, dataType=3)

                QtGui.QApplication.processEvents(
                )  # check for GUI events, such as button presses

                # if done flag, break out of loop
                if appObj.doneFlag:
                    break

                frameNum += 1
                amp_idx += 1

            # if done flag, break out of loop
            if appObj.doneFlag:
                break

            freq_idx += 1

        saveOpts = appObj.getSaveOpts()
        workbook = appObj.excelWB
        note = saveOpts.note
        number = appObj.CMnumber
        name = 'CM'
        d = datetime.datetime.now()
        timeStr = d.strftime('%H_%M_%S')
        excelWS = CMPCommon.initExcelSpreadsheet(workbook, name, number,
                                                 timeStr, note)

        appObj.CMnumber += 1
        saveOpts.saveTracings = appObj.CM_saveTracings_checkBox.isChecked()
        saveDir = appObj.saveDir_lineEdit.text()
        saveCMDataXLS(CMdata, trialDur, nReps, excelWS, saveOpts)
        #saveCMData(CMdata, trialDur, nReps, appObj.saveFileTxt_filepath, saveOpts, timeStr)

        plotName = 'CM %d %s %s' % (number, timeStr, saveOpts.note)
        saveCMDataFig(CMdata, trialDur, nReps, saveDir, plotName, timeStr)
        saveCMDataPickle(CMdata, trialDur, nReps, plotName, saveOpts, timeStr)

    except Exception as ex:
        traceback.print_exc(file=sys.stdout)
        QtGui.QMessageBox.critical(
            appObj, "Error",
            "Error during collection. Check command line output for details")

    8  # update the audio hardware speaker calibration
    appObj.isCollecting = False
    QtGui.QApplication.processEvents(
    )  # check for GUI events, such as button presses
    appObj.finishCollection()
Beispiel #4
0
def runABR(appObj, testMode=False):
    print("runABR")
    
    appObj.tabWidget.setCurrentIndex(3)
    appObj.doneFlag = False
    appObj.isCollecting = True
    # trigRate = octfpga.GetTriggerRate()
    audioHW = appObj.audioHW
    bioamp = appObj.bioamp
    outputRate = audioHW.DAQOutputRate
    inputRate = audioHW.DAQInputRate

    # freq_array2 = audioParams.freq[1, :]
    freqArray = appObj.getFrequencyArray()
    ABRparams = ABRParams(appObj)
    
    if testMode:
        testDataDir = os.path.join(appObj.basePath, 'exampledata', 'Speaker Calibration')
#        filePath = os.path.join(testDataDir, 'AudioParams.pickle')
#        f = open(filePath, 'rb')
#        audioParams = pickle.load(f)
#        f.close()
    else:
        # freqArray = appObj.getFrequencyArray()
        i1 = appObj.ABR_freqLow_comboBox.currentIndex()
        i2 = appObj.ABR_freqHigh_comboBox.currentIndex()
        print("runABR: i1= ", i1, "i2= ", i2)
        ampLow = appObj.ABRampLow_spinBox.value()
        ampHigh = appObj.ABRampHigh_spinBox.value()
        ampDelta = appObj.ABRampDelta_spinBox.value()
        
        # ampArray = np.arange(ampLow, ampHigh, ampDelta)
        #numSteps = np.floor((ampHigh - ampLow)/ampDelta) + 1
        #ampArray = np.linspace(ampLow, ampHigh, numSteps)
        if ampLow == ampHigh:
            ampArray = np.array([ampLow])
        else:
            ampArray = np.arange(ampLow, ampHigh, ampDelta)
            if ampArray[-1] != ampHigh:
                ampArray = np.hstack((ampArray, ampHigh))
        
        freqArray = freqArray[i1:i2+1]
    
    if ABRparams.click:
        freqArray = freqArray[0:1]  # only use single freqeucny
        clickRMS = appObj.ABRclick_RMS
        
    # numSpk = audioParams.getNumSpeakers()
    if not testMode:
        from DAQHardware import DAQHardware
        daq = DAQHardware()

    chanNamesIn= [ audioHW.mic_daqChan, bioamp.daqChan]
    micVoltsPerPascal = audioHW.micVoltsPerPascal
    
    

    # set input rate to three times the highest output frequency, to allow plus a 
    
#    inputRate = 3*freqArray[-1]
#    print("runABR: outputRate= ", outputRate, " inputRate= ", inputRate)
#    inputRate = np.max((inputRate, 6e3))   # inpute rate should be at least 6 kHz because ABR responses occur 0.3 - 3 kHz
#    inputRate = outputRate / int(np.floor(outputRate / inputRate))  # pick closest input rate that evenly divides output rate
#    print("runABR: inputRate(final)= ", inputRate)
    
    try:
        frameNum = 0
        numFrames = len(freqArray)*len(ampArray)
        isSaveDirInit = False
        chanNameOut = audioHW.speakerL_daqChan 
        attenLines = audioHW.attenL_daqChan
        
        freq_idx = 0
        ABRdata = None
        appObj.status_label.setText("Running")
        appObj.progressBar.setValue(0)
        
        for freq in freqArray:
            spkOut_trial = makeABROutput(freq, ABRparams, audioHW)
            npts = len(spkOut_trial)
            spkOut = np.tile(spkOut_trial, ABRparams.nReps)
            # invert every other trial, necessary for ABR/CAP output 
            for n in range(1, ABRparams.nReps, 2):
                idx1 = n*npts
                idx2 = (n+1)*npts
                spkOut[idx1:idx2] = -spkOut[idx1:idx2]
#            plt.figure(5)
#            plt.clf()
#            plt.plot(spkOut)
            tOut = np.linspace(0, npts/outputRate, npts)
            print("runABR npts=%d len(spkOut_trial)= %d len(tOut)= %d" % (npts, len(spkOut_trial), len(tOut)))
            amp_idx = 0
            ptsPerRep = int(inputRate*ABRparams.trialDur)
            
            for amp in ampArray:
                print("runABR freq=" + repr(freq), " amp= ", + amp, " freq_idx= ", freq_idx, " amp_idx= ", amp_idx)
                if ABRparams.click:
                    clickRMS = appObj.ABRclick_RMS
                    attenLvl = 0
                    vOut = 10**((amp - clickRMS)/20)
                    minV = audioHW.speakerOutputRng[0]
                    if vOut < minV:
                        attenLvl = int(round(20*np.log10(minV/vOut)))
                        vOut = minV
                else:
                    vOut, attenLvl = audioHW.getCalibratedOutputVoltageAndAttenLevel(freq, amp, 0)
                
                print("runABR vOut= ", vOut, " atenLvl=", attenLvl)
                
                if vOut > audioHW.speakerOutputRng[1]:
                    print("runABR vOut= ", vOut, "  out of range")
                    continue
                elif attenLvl > audioHW.maxAtten:
                    print("runABR attenLvl= ", attenLvl, "  gerater than maximum attenuation")
                    continue
                    
                # attenSig = AudioHardware.makeLM1972AttenSig(0)
                if not testMode:
                    AudioHardware.Attenuator.setLevel(attenLvl, attenLines)
                    # daq.sendDigOutABRd(attenLines, attenSig)
                    # appObj.oct_hw.SetAttenLevel(0, attenLines)
                
                pl = appObj.ABR_output
                pl.clear()
                endIdx = int(5e-3 * outputRate)        # only plot first 5 ms
                #pl.plot(t[0:endIdx], spkOut[0:endIdx], pen='b')
                pl.plot(tOut, spkOut_trial, pen='b')
                
                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Time', 's', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Output', 'V', **labelStyle)
                
                numInputSamples = ABRparams.nReps*int(inputRate*len(spkOut_trial)/outputRate)
                
                if testMode:
                    # mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)                    
                    pass
                else:
    
                    # setup the output task
                    daq.setupAnalogOutput([chanNameOut], audioHW.daqTrigChanIn, int(outputRate), vOut*spkOut)
                    daq.startAnalogOutput()
                    
                    # setup the input task
                    daq.setupAnalogInput(chanNamesIn, audioHW.daqTrigChanIn, int(inputRate), numInputSamples) 
                    daq.startAnalogInput()
                
                    # trigger the acquiisiton by sending ditital pulse
                    daq.sendDigTrig(audioHW.daqTrigChanOut)
                    
                    timeout = numInputSamples/inputRate + 2
                    dataIn = daq.readAnalogInput(timeout)
                    mic_data = dataIn[0, :]
                    bioamp_data = dataIn[1, :]
                    
                    mic_data = mic_data/micVoltsPerPascal
                    bioamp_data = bioamp_data/bioamp.gain
                
                    daq.waitDoneInput()
                    daq.stopAnalogInput()
                    daq.clearAnalogInput()
                    
                    daq.waitDoneOutput(stopAndClear=True)
                
#                npts = len(mic_data)
#                t = np.linspace(0, npts/inputRate, npts)
#                pl = appObj.ABR_micInput
#                pl.clear()
#                pl.plot(t, mic_data, pen='b')
#                
#                labelStyle = appObj.xLblStyle
#                pl.setLabel('bottom', 'Time', 's', **labelStyle)
#                labelStyle = appObj.yLblStyle
#                pl.setLabel('left', 'Response', 'Pa', **labelStyle)
    
    # def processABRData(mic_data, bioamp_data, nReps, freq, amp_idx, inputRate, ABRdataIn):            
                ABRptData, ABRdata = processABRData(mic_data, bioamp_data, freq, freq_idx, amp_idx, freqArray, ampArray, inputRate, ABRdata, ABRparams)

                print("runABR: plotting data")
                plotABRdata(appObj, ABRptData, ABRdata)
                
    #                if appObj.getSaveState():
    #                    if not isSaveDirInit:
    #                        saveDir = OCTCommon.initSaveDir(saveOpts, 'Speaker Calibration', audioParams=audioParams)
    #                        isSaveDirInit = True
    #    
    #                    if saveOpts.saveRaw:
    #                        OCTCommon.saveRawData(mic_data, saveDir, frameNum, dataType=3)
                idx1 = round(inputRate*ABRparams.stimOffset)
                idx2 = idx1 + round(inputRate*ABRparams.stimDur)
                
                mic_data = mic_data[idx1:idx2] 
                rms = np.mean(mic_data ** 2) ** 0.5
                rms = 20*np.log10(rms/2e-5)
                
                appObj.ABR_rms_label.setText("%0.1f dB" % rms)                    
                
                QtGui.QApplication.processEvents() # check for GUI events, such as button presses
                
                # if done flag, break out of loop
                if appObj.doneFlag:
                    break
                
                frameNum += 1
                amp_idx += 1
                appObj.progressBar.setValue(frameNum/numFrames)
                
            # if done flag, break out of loop
            if appObj.doneFlag:
                break
            
            freq_idx += 1


        saveOpts = appObj.getSaveOpts()
        workbook = appObj.excelWB
        note = saveOpts.note
        number = appObj.ABRnumber
        name = 'ABR'
        d = datetime.datetime.now()
        timeStr = d.strftime('%H_%M_%S')
        excelWS = CMPCommon.initExcelSpreadsheet(workbook, name, number, timeStr, note)
    
        appObj.ABRnumber += 1                
        #saveOpts.saveTracings = appObj.ABR_saveTracings_checkBox.isChecked()
        saveOpts.saveTracings = True
        saveDir = appObj.saveDir_lineEdit.text()
        saveABRDataXLS(ABRdata, ABRparams, excelWS, saveOpts)
        #saveABRData(ABRdata, trialDur, nReps, appObj.saveFileTxt_filepath, saveOpts, timeStr)
        
        plotName = 'ABR %d %s %s' % (number, timeStr, saveOpts.note)
        saveABRDataFig(ABRdata, ABRparams, saveDir, plotName, timeStr)
        saveABRDataPickle(ABRdata, ABRparams, plotName, saveOpts, timeStr)
            
    except Exception as ex:
        traceback.print_exc(file=sys.stdout)
        QtGui.QMessageBox.critical (appObj, "Error", "Error during collection. Check command line output for details")           
        
    # update the audio hardware speaker calibration                     
    appObj.isCollecting = False
    QtGui.QApplication.processEvents() # check for GUI events, such as button presses
    appObj.finishCollection()
Beispiel #5
0
def calibrateScanMirror(appObj):
    DebugLog.log("calibrateScanMirror")    
    appObj.tabWidget.setCurrentIndex(7)
    appObj.doneFlag = False
    appObj.isCollecting = True
    appObj.JSOsaveDispersion_pushButton.setEnabled(True)
    appObj.JSOloadDispersion_pushButton.setEnabled(False)

    if not appObj.oct_hw.IsOCTTestingMode():     # prepare to get new data            
        from DAQHardware import DAQHardware
        daq = DAQHardware()
    audioHW=appObj.audioHW
    mirrorDriver = appObj.mirrorDriver    
    chanNames = [mirrorDriver.X_daqChan, mirrorDriver.Y_daqChan]
    trigChan = audioHW.daqTrigChanIn   #use the audio trigger to start the scan
    outputRate = mirrorDriver.DAQoutputRate

    while appObj.doneFlag == False:        # keep running until the button is turned off 
        scanParams = appObj.getScanParams()
       #    create scan pattern to drive the mirrors
        mode=appObj.scanShape_comboBox.currentIndex()
        print('mode',mode)
        if mode==0:   # create a spiral scan using fast (resonant) scanning
            Vmaxx=mirrorDriver.voltRange[1] # maximum voltage for MEMS mirror for x-axis
            Vmaxy=mirrorDriver.voltRange[1] # maximum voltage for MEMS mirror for y-axis
            xAdjust = 1    
            yAdjust = scanParams.skewResonant
            phaseShift = scanParams.phaseAdjust
            fr = mirrorDriver.resonantFreq  # angular scan rate (frequency of one rotation - resonant frequency)
            fv = scanParams.volScanFreq     # plotParam scan frequency, which scans in and then out, which is actually two volumes
            DebugLog.log("freq of one rotation (fr)= %d; scan frequency (fv)= %d" % (fr, fv))
            diameter = scanParams.length
            voltsPerMM = mirrorDriver.voltsPerMillimeterResonant
            A1=(Vmaxx/2)/xAdjust
            A2=(Vmaxy/2)/yAdjust
            A3=voltsPerMM*diameter/2 
            A=np.min([A1,A2,A3])           
            fs=mirrorDriver.DAQoutputRate   # galvo output sampling rate
            t=np.arange(0,np.around(fs/fv))*1/fs  # t is the array of times for the DAQ output to the mirrors
            r=1/2*(1-np.cos(2*np.pi*fv*t))            
            x=xAdjust*A*r*np.cos(2*np.pi*fr*t) # x and y are the coordinates of the laser at each point in time
            y=yAdjust*A*r*np.sin(2*np.pi*fr*t+phaseShift*np.pi/180)
            mirrorOut= np.vstack((x,y))
            
        elif mode==1:   # create a square scan using slow parameters
            Vmaxx=mirrorDriver.voltRange[1] # maximum voltage for MEMS mirror for x-axis
            Vmaxy=mirrorDriver.voltRange[1] # maximum voltage for MEMS mirror for y-axis
            xAdjust = 1    
            yAdjust = scanParams.skewNonResonant
            diameter = scanParams.length
            voltsPerMMX = mirrorDriver.voltsPerMillimeter*xAdjust
            voltsPerMMY = mirrorDriver.voltsPerMillimeter*yAdjust
            if ((diameter/2)*voltsPerMMX)>Vmaxx:
                diameter=2*Vmaxx/voltsPerMMX
            if ((diameter/2)*voltsPerMMY)>Vmaxy:
                diameter=2*Vmaxy/voltsPerMMY
            freq = appObj.cal_freq_dblSpinBox.value()
            if freq>mirrorDriver.LPFcutoff:  # can't go faster than the maximum scan rate
                appObj.cal_freq_dblSpinBox.setValue(mirrorDriver.LPFcutoff)
            fs=mirrorDriver.DAQoutputRate   # galvo output sampling rate
            t1=np.arange(0,np.around(fs/freq))*1/fs  
            n=np.around(t1.shape[0]/4)-1   # number of points in each 4th of the cycle (reduce by 1 to make it easy to shorten t1)
            t=t1[0:4*n]  # t is the array of times for the DAQ output to the mirrors
            cornerX=(diameter/2)*voltsPerMMX     # voltage at each corner of the square            
            cornerY=(diameter/2)*voltsPerMMY     # voltage at each corner of the square            
            
            # x and y are the coordinates of the laser at each point in time
            x=np.zeros(t.shape)            
            y=np.zeros(t.shape)            
            x[0:n]=np.linspace(-cornerX,cornerX,n)
            y[0:n]=-cornerY
            x[n:2*n]=cornerX
            y[n:2*n]=np.linspace(-cornerY,cornerY,n)
            x[2*n:3*n]=np.linspace(cornerX,-cornerX,n)
            y[2*n:3*n]=cornerY
            x[3*n:4*n]=-cornerX
            y[3*n:4*n]=np.linspace(cornerY,-cornerY,n)
            mirrorOut1= np.vstack((x,y))
            if mirrorDriver.MEMS==True:
                mirrorOut=scipy.signal.filtfilt(mirrorDriver.b_filt,mirrorDriver.a_filt,mirrorOut1)           
            else:
                mirrorOut=mirrorOut1    

        # plot mirror commands to GUI 
        pl = appObj.JSOmisc_plot1
        npts = mirrorOut.shape[1]
        t = np.linspace(0, npts/outputRate, npts)
        pl.clear()
        pl.plot(t, mirrorOut[0, :], pen='b')  
        pl.plot(t, mirrorOut[1, :], pen='r')  
        labelStyle = appObj.xLblStyle
        pl.setLabel('bottom', 'Time', 's', **labelStyle)
        labelStyle = appObj.yLblStyle
        pl.setLabel('left', 'Output', 'V', **labelStyle)
    
        pl2=appObj.JSOmisc_plot2
        pl2.clear()
        pl2.plot(mirrorOut[0, :],mirrorOut[1, :], pen='b')
        labelStyle = appObj.xLblStyle
        pl2.setLabel('bottom', 'X galvo', 'V', **labelStyle)
        labelStyle = appObj.yLblStyle
        pl2.setLabel('left', 'Y galvo', 'V', **labelStyle)
     
        if not appObj.oct_hw.IsDAQTestingMode():
            # setup the analog output DAQ device
            daq.setupAnalogOutput(chanNames, trigChan, outputRate, mirrorOut.transpose())        
            daq.startAnalogOutput()
            
            #start trigger and wait for output to finish 
            daq.sendDigTrig(audioHW.daqTrigChanOut)
            daq.waitDoneOutput(timeout=3, stopAndClear=True)
            
            QtGui.QApplication.processEvents() # check for GUI events
        else:
            appObj.doneFlag = True      # just run one time through if in test mode
            appObj.CalibrateScanMirror_pushButton.setChecked(False)
                  
    # when testing is over, set the mirror position to (0,0)
    if not appObj.oct_hw.IsDAQTestingMode():
        chanNames = [mirrorDriver.X_daqChan, mirrorDriver.Y_daqChan]
        data = np.zeros(2)
        daq.writeValues(chanNames, data)
    
    appObj.JSOsaveDispersion_pushButton.setEnabled(False)    
    appObj.JSOloadDispersion_pushButton.setEnabled(True)        
    appObj.isCollecting = False
    appObj.finishCollection()
Beispiel #6
0
def calibrateClick(appObj, testMode=False):
    print("ABR.calibrateClick")

    appObj.tabWidget.setCurrentIndex(3)
    appObj.doneFlag = False
    appObj.isCollecting = True
    # trigRate = octfpga.GetTriggerRate()
    audioHW = appObj.audioHW
    outputRate = audioHW.DAQOutputRate
    inputRate = audioHW.DAQInputRate

    # freq_array2 = audioParams.freq[1, :]
    freqArray = appObj.getFrequencyArray()

    if testMode:
        testDataDir = os.path.join(appObj.basePath, 'exampledata',
                                   'Speaker Calibration')


#        filePath = os.path.join(testDataDir, 'AudioParams.pickle')
#        f = open(filePath, 'rb')
#        audioParams = pickle.load(f)
#        f.close()
    else:
        pass

    # numSpk = audioParams.getNumSpeakers()
    if not testMode:
        from DAQHardware import DAQHardware
        daq = DAQHardware()

    chanNamesIn = [audioHW.mic_daqChan]
    micVoltsPerPascal = audioHW.micVoltsPerPascal
    ABRparams = ABRParams(appObj)
    ABRparams.click = True
    ABRparams.nReps = 20
    print("ABR.calibrateClick ABRparams=", ABRparams.__dict__)
    # set input rate to three times the highest output frequency, to allow plus a

    #    inputRate = 3*freqArray[-1]
    #    print("runABR: outputRate= ", outputRate, " inputRate= ", inputRate)
    #    inputRate = np.max((inputRate, 6e3))   # inpute rate should be at least 6 kHz because ABR responses occur 0.3 - 3 kHz
    #    inputRate = outputRate / int(np.floor(outputRate / inputRate))  # pick closest input rate that evenly divides output rate
    #    print("runABR: inputRate(final)= ", inputRate)

    try:
        chanNameOut = audioHW.speakerL_daqChan
        attenLines = audioHW.attenL_daqChan

        spkOut_trial = makeABROutput(4e3, ABRparams, audioHW)
        spkOut = np.tile(spkOut_trial, ABRparams.nReps)
        npts = len(spkOut_trial)
        tOut = np.linspace(0, npts / outputRate, npts)

        # attenSig = AudioHardware.makeLM1972AttenSig(0)
        if not testMode:
            AudioHardware.Attenuator.setLevel(0, attenLines)

        pl = appObj.ABR_output
        pl.clear()
        endIdx = int(5e-3 * outputRate)  # only plot first 5 ms
        #pl.plot(t[0:endIdx], spkOut[0:endIdx], pen='b')
        pl.plot(tOut, spkOut_trial, pen='b')

        labelStyle = appObj.xLblStyle
        pl.setLabel('bottom', 'Time', 's', **labelStyle)
        labelStyle = appObj.yLblStyle
        pl.setLabel('left', 'Output', 'V', **labelStyle)

        numInputSamples = int(inputRate * len(spkOut) / outputRate)

        if testMode:
            # mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)
            pass
        else:
            # setup the output task
            daq.setupAnalogOutput([chanNameOut], audioHW.daqTrigChanIn,
                                  int(outputRate), spkOut)
            daq.startAnalogOutput()

            # setup the input task
            daq.setupAnalogInput(chanNamesIn, audioHW.daqTrigChanIn,
                                 int(inputRate), numInputSamples)
            daq.startAnalogInput()

            # trigger the acquiisiton by sending ditital pulse
            daq.sendDigTrig(audioHW.daqTrigChanOut)

            timeout = numInputSamples / inputRate + 2
            dataIn = daq.readAnalogInput(timeout)
            mic_data = dataIn[0, :]

            mic_data = mic_data / micVoltsPerPascal

            daq.waitDoneInput()
            daq.stopAnalogInput()
            daq.clearAnalogInput()

            daq.waitDoneOutput(stopAndClear=True)

        print("ABR.calibrateClick: plotting data")
        npts = len(mic_data)

        # reshape and average the mic data
        ptsPerRep = npts // ABRparams.nReps
        mic_data = np.reshape(mic_data, (ABRparams.nReps, ptsPerRep))
        mic_data = np.mean(mic_data, 0)

        # plot mic data
        npts = len(mic_data)
        t = np.linspace(0, npts / inputRate, npts)
        pl = appObj.ABR_micInput
        pl.clear()
        pl.plot(t, mic_data, pen='b')

        labelStyle = appObj.xLblStyle
        pl.setLabel('bottom', 'Time', 's', **labelStyle)
        labelStyle = appObj.yLblStyle
        pl.setLabel('left', 'Response', 'Pa', **labelStyle)

        idx1 = round(inputRate * ABRparams.stimOffset)
        idx2 = idx1 + round(inputRate * ABRparams.stimDur)

        mic_data = mic_data[idx1:idx2]
        # apply high pass filter to get rid of LF components
        #        (b, a) = scipy.signal.butter(5, 100/inputRate, 'high')
        #        mic_data = scipy.signal.lfilter(b, a, mic_data)

        rms = np.mean(mic_data**2)**0.5
        rms = 20 * np.log10(rms / 2e-5)
        appObj.ABRclick_RMS = rms

        appObj.ABR_rms_label.setText("%0.1f dB" % rms)
        print("ABR.calibrateClick: RMS= ", rms)

    except Exception as ex:
        traceback.print_exc(file=sys.stdout)
        QtGui.QMessageBox.critical(
            appObj, "Error",
            "Error during collection. Check command line output for details")

    appObj.isCollecting = False
    QtGui.QApplication.processEvents(
    )  # check for GUI events, such as button presses
    appObj.finishCollection()
Beispiel #7
0
def runABR(appObj, testMode=False):
    print("runABR")

    appObj.tabWidget.setCurrentIndex(3)
    appObj.doneFlag = False
    appObj.isCollecting = True
    # trigRate = octfpga.GetTriggerRate()
    audioHW = appObj.audioHW
    bioamp = appObj.bioamp
    outputRate = audioHW.DAQOutputRate
    inputRate = audioHW.DAQInputRate

    # freq_array2 = audioParams.freq[1, :]
    freqArray = appObj.getFrequencyArray()
    ABRparams = ABRParams(appObj)

    if testMode:
        testDataDir = os.path.join(appObj.basePath, 'exampledata',
                                   'Speaker Calibration')
#        filePath = os.path.join(testDataDir, 'AudioParams.pickle')
#        f = open(filePath, 'rb')
#        audioParams = pickle.load(f)
#        f.close()
    else:
        # freqArray = appObj.getFrequencyArray()
        i1 = appObj.ABR_freqLow_comboBox.currentIndex()
        i2 = appObj.ABR_freqHigh_comboBox.currentIndex()
        print("runABR: i1= ", i1, "i2= ", i2)
        ampLow = appObj.ABRampLow_spinBox.value()
        ampHigh = appObj.ABRampHigh_spinBox.value()
        ampDelta = appObj.ABRampDelta_spinBox.value()

        # ampArray = np.arange(ampLow, ampHigh, ampDelta)
        #numSteps = np.floor((ampHigh - ampLow)/ampDelta) + 1
        #ampArray = np.linspace(ampLow, ampHigh, numSteps)
        if ampLow == ampHigh:
            ampArray = np.array([ampLow])
        else:
            ampArray = np.arange(ampLow, ampHigh, ampDelta)
            if ampArray[-1] != ampHigh:
                ampArray = np.hstack((ampArray, ampHigh))

        freqArray = freqArray[i1:i2 + 1]

    if ABRparams.click:
        freqArray = freqArray[0:1]  # only use single freqeucny
        clickRMS = appObj.ABRclick_RMS

    # numSpk = audioParams.getNumSpeakers()
    if not testMode:
        from DAQHardware import DAQHardware
        daq = DAQHardware()

    chanNamesIn = [audioHW.mic_daqChan, bioamp.daqChan]
    micVoltsPerPascal = audioHW.micVoltsPerPascal

    # set input rate to three times the highest output frequency, to allow plus a

    #    inputRate = 3*freqArray[-1]
    #    print("runABR: outputRate= ", outputRate, " inputRate= ", inputRate)
    #    inputRate = np.max((inputRate, 6e3))   # inpute rate should be at least 6 kHz because ABR responses occur 0.3 - 3 kHz
    #    inputRate = outputRate / int(np.floor(outputRate / inputRate))  # pick closest input rate that evenly divides output rate
    #    print("runABR: inputRate(final)= ", inputRate)

    try:
        frameNum = 0
        numFrames = len(freqArray) * len(ampArray)
        isSaveDirInit = False
        chanNameOut = audioHW.speakerL_daqChan
        attenLines = audioHW.attenL_daqChan

        freq_idx = 0
        ABRdata = None
        appObj.status_label.setText("Running")
        appObj.progressBar.setValue(0)

        for freq in freqArray:
            spkOut_trial = makeABROutput(freq, ABRparams, audioHW)
            npts = len(spkOut_trial)
            spkOut = np.tile(spkOut_trial, ABRparams.nReps)
            # invert every other trial, necessary for ABR/CAP output
            for n in range(1, ABRparams.nReps, 2):
                idx1 = n * npts
                idx2 = (n + 1) * npts
                spkOut[idx1:idx2] = -spkOut[idx1:idx2]
#            plt.figure(5)
#            plt.clf()
#            plt.plot(spkOut)
            tOut = np.linspace(0, npts / outputRate, npts)
            print("runABR npts=%d len(spkOut_trial)= %d len(tOut)= %d" %
                  (npts, len(spkOut_trial), len(tOut)))
            amp_idx = 0
            ptsPerRep = int(inputRate * ABRparams.trialDur)

            for amp in ampArray:
                print("runABR freq=" + repr(freq), " amp= ", +amp,
                      " freq_idx= ", freq_idx, " amp_idx= ", amp_idx)
                if ABRparams.click:
                    clickRMS = appObj.ABRclick_RMS
                    attenLvl = 0
                    vOut = 10**((amp - clickRMS) / 20)
                    minV = audioHW.speakerOutputRng[0]
                    if vOut < minV:
                        attenLvl = int(round(20 * np.log10(minV / vOut)))
                        vOut = minV
                else:
                    vOut, attenLvl = audioHW.getCalibratedOutputVoltageAndAttenLevel(
                        freq, amp, 0)

                print("runABR vOut= ", vOut, " atenLvl=", attenLvl)

                if vOut > audioHW.speakerOutputRng[1]:
                    print("runABR vOut= ", vOut, "  out of range")
                    continue
                elif attenLvl > audioHW.maxAtten:
                    print("runABR attenLvl= ", attenLvl,
                          "  gerater than maximum attenuation")
                    continue

                # attenSig = AudioHardware.makeLM1972AttenSig(0)
                if not testMode:
                    AudioHardware.Attenuator.setLevel(attenLvl, attenLines)
                    # daq.sendDigOutABRd(attenLines, attenSig)
                    # appObj.oct_hw.SetAttenLevel(0, attenLines)

                pl = appObj.ABR_output
                pl.clear()
                endIdx = int(5e-3 * outputRate)  # only plot first 5 ms
                #pl.plot(t[0:endIdx], spkOut[0:endIdx], pen='b')
                pl.plot(tOut, spkOut_trial, pen='b')

                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Time', 's', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Output', 'V', **labelStyle)

                numInputSamples = ABRparams.nReps * int(
                    inputRate * len(spkOut_trial) / outputRate)

                if testMode:
                    # mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)
                    pass
                else:

                    # setup the output task
                    daq.setupAnalogOutput([chanNameOut], audioHW.daqTrigChanIn,
                                          int(outputRate), vOut * spkOut)
                    daq.startAnalogOutput()

                    # setup the input task
                    daq.setupAnalogInput(chanNamesIn, audioHW.daqTrigChanIn,
                                         int(inputRate), numInputSamples)
                    daq.startAnalogInput()

                    # trigger the acquiisiton by sending ditital pulse
                    daq.sendDigTrig(audioHW.daqTrigChanOut)

                    timeout = numInputSamples / inputRate + 2
                    dataIn = daq.readAnalogInput(timeout)
                    mic_data = dataIn[0, :]
                    bioamp_data = dataIn[1, :]

                    mic_data = mic_data / micVoltsPerPascal
                    bioamp_data = bioamp_data / bioamp.gain

                    daq.waitDoneInput()
                    daq.stopAnalogInput()
                    daq.clearAnalogInput()

                    daq.waitDoneOutput(stopAndClear=True)


#                npts = len(mic_data)
#                t = np.linspace(0, npts/inputRate, npts)
#                pl = appObj.ABR_micInput
#                pl.clear()
#                pl.plot(t, mic_data, pen='b')
#
#                labelStyle = appObj.xLblStyle
#                pl.setLabel('bottom', 'Time', 's', **labelStyle)
#                labelStyle = appObj.yLblStyle
#                pl.setLabel('left', 'Response', 'Pa', **labelStyle)

# def processABRData(mic_data, bioamp_data, nReps, freq, amp_idx, inputRate, ABRdataIn):
                ABRptData, ABRdata = processABRData(mic_data, bioamp_data,
                                                    freq, freq_idx, amp_idx,
                                                    freqArray, ampArray,
                                                    inputRate, ABRdata,
                                                    ABRparams)

                print("runABR: plotting data")
                plotABRdata(appObj, ABRptData, ABRdata)

                #                if appObj.getSaveState():
                #                    if not isSaveDirInit:
                #                        saveDir = OCTCommon.initSaveDir(saveOpts, 'Speaker Calibration', audioParams=audioParams)
                #                        isSaveDirInit = True
                #
                #                    if saveOpts.saveRaw:
                #                        OCTCommon.saveRawData(mic_data, saveDir, frameNum, dataType=3)
                idx1 = round(inputRate * ABRparams.stimOffset)
                idx2 = idx1 + round(inputRate * ABRparams.stimDur)

                mic_data = mic_data[idx1:idx2]
                rms = np.mean(mic_data**2)**0.5
                rms = 20 * np.log10(rms / 2e-5)

                appObj.ABR_rms_label.setText("%0.1f dB" % rms)

                QtGui.QApplication.processEvents(
                )  # check for GUI events, such as button presses

                # if done flag, break out of loop
                if appObj.doneFlag:
                    break

                frameNum += 1
                amp_idx += 1
                appObj.progressBar.setValue(frameNum / numFrames)

            # if done flag, break out of loop
            if appObj.doneFlag:
                break

            freq_idx += 1

        saveOpts = appObj.getSaveOpts()
        workbook = appObj.excelWB
        note = saveOpts.note
        number = appObj.ABRnumber
        name = 'ABR'
        d = datetime.datetime.now()
        timeStr = d.strftime('%H_%M_%S')
        excelWS = CMPCommon.initExcelSpreadsheet(workbook, name, number,
                                                 timeStr, note)

        appObj.ABRnumber += 1
        #saveOpts.saveTracings = appObj.ABR_saveTracings_checkBox.isChecked()
        saveOpts.saveTracings = True
        saveDir = appObj.saveDir_lineEdit.text()
        saveABRDataXLS(ABRdata, ABRparams, excelWS, saveOpts)
        #saveABRData(ABRdata, trialDur, nReps, appObj.saveFileTxt_filepath, saveOpts, timeStr)

        plotName = 'ABR %d %s %s' % (number, timeStr, saveOpts.note)
        saveABRDataFig(ABRdata, ABRparams, saveDir, plotName, timeStr)
        saveABRDataPickle(ABRdata, ABRparams, plotName, saveOpts, timeStr)

    except Exception as ex:
        traceback.print_exc(file=sys.stdout)
        QtGui.QMessageBox.critical(
            appObj, "Error",
            "Error during collection. Check command line output for details")

    # update the audio hardware speaker calibration
    appObj.isCollecting = False
    QtGui.QApplication.processEvents(
    )  # check for GUI events, such as button presses
    appObj.finishCollection()
Beispiel #8
0
def runSpeakerCal(appObj, testMode=False):
    print("runSpeakerCal")
    appObj.tabWidget.setCurrentIndex(0)
    appObj.doneFlag = False
    appObj.isCollecting = True
    # trigRate = octfpga.GetTriggerRate()
    audioHW = appObj.audioHW
    outputRate = audioHW.DAQOutputRate
    inputRate = audioHW.DAQInputRate

    if testMode:
        testDataDir = os.path.join(appObj.basePath, 'exampledata',
                                   'Speaker Calibration')
#        filePath = os.path.join(testDataDir, 'AudioParams.pickle')
#        f = open(filePath, 'rb')
#        audioParams = pickle.load(f)
#        f.close()
    else:
        freqArray = appObj.getFrequencyArray()

    # numSpk = audioParams.getNumSpeakers()
    numSpk = 1
    cIdx = appObj.speaker_comboBox.currentIndex()
    if cIdx > 0:
        numSpk = 2

    if not testMode:
        from DAQHardware import DAQHardware
        daq = DAQHardware()

    chanNamesIn = [audioHW.mic_daqChan]
    micVoltsPerPascal = audioHW.micVoltsPerPascal
    # mode = 'chirp'
    mode = ''
    spCal = None
    # freq_array2 = audioParams.freq[1, :]

    try:
        frameNum = 0
        isSaveDirInit = False
        trialDur = appObj.spCal_stimDuration_dblSpinBox.value() * 1e-3

        freq_array = freqArray
        freq_array2 = freqArray / 1.22

        if numSpk == 1:
            freq_array = np.concatenate((freq_array, freq_array2))
            freq_array = np.sort(freq_array)
            freq_array2 = freq_array

        spCal = SpeakerCalData(np.vstack((freq_array, freq_array2)))

        for spkNum in range(0, numSpk):
            chanNameOut = audioHW.speakerL_daqChan
            #attenLines = audioHW.attenL_daqChan
            #attenLinesOther = audioHW.attenR_daqChan
            spkIdx = 0

            attenLvl1 = 0
            attenLvl2 = audioHW.maxAtten

            if spkNum == 2:
                #chanNameOut = audioHW.speakerR_daqChan
                #attenLines = audioHW.attenR_daqChan
                #attenLinesOther = audioHW.attenL_daqChan
                spkIdx = 1
                attenLvl1 = audioHW.maxAtten
                attenLvl2 = 0

            freq_idx = 0

            if not testMode:
                audioHW.setAttenuatorLevel(attenLvl1, attenLvl2, daq)

                # daq.sendDigOutCmd(attenLines, attenSig)
                # appObj.oct_hw.SetAttenLevel(0, attenLines)

            if mode == 'chirp':
                tChirp = 1
                f0 = 100
                f1 = 100e3
                k = (f1 - f0) / tChirp
                nChirpPts = round(outputRate * tChirp)
                t = np.linspace(0, tChirp, nChirpPts)
                spkOut = np.cos(2 * np.pi * (f1 * t + (k / 2) * t**2))

                pl = appObj.spCal_output
                pl.clear()
                endIdx = int(5e-3 * outputRate)  # only plot first 5 ms
                pl.plot(t[0:endIdx], spkOut[0:endIdx], pen='b')

                numInputSamples = int(inputRate * len(spkOut) / outputRate)

                if testMode:
                    # mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)
                    pass
                else:
                    daq.setupAnalogOutput([chanNameOut], audioHW.daqTrigChanIn,
                                          int(outputRate), spkOut)
                    daq.startAnalogOutput()

                    # setup the input task
                    daq.setupAnalogInput(chanNamesIn, audioHW.daqTrigChanIn,
                                         int(inputRate), numInputSamples)
                    daq.startAnalogInput()

                    # trigger the acquiisiton by sending ditital pulse
                    daq.sendDigTrig(audioHW.daqTrigChanOut)

                    mic_data = daq.readAnalogInput()
                    mic_data = mic_data[0, :]
                    mic_data_chirp = mic_data / micVoltsPerPascal

                if not testMode:
                    daq.waitDoneOutput(stopAndClear=True)
                    daq.stopAnalogInput()
                    daq.clearAnalogInput()

                npts = len(mic_data)
                t = np.linspace(0, npts / inputRate, npts)
                pl = appObj.spCal_micInput
                pl.clear()
                pl.plot(t, mic_data, pen='b')

                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Time', 's', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Response', 'Pa', **labelStyle)

                # play refernce tone
                refFreq = 4e3
                tRef = 50e-3

                nRefPts = round(outputRate * tRef)
                t = np.linspace(0, tRef, nRefPts)
                spkOut = np.cos(2 * np.pi * refFreq * t)

                # apply envelope
                i1 = round(outputRate * 1e-3)
                i2 = nRefPts - i1
                env = np.linspace(0, 1, i1)
                spkOut[0:i1] = spkOut[0:i1] * env
                spkOut[i2:] = spkOut[i2:] * (1 - env)

                if testMode:
                    # mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)
                    pass
                else:
                    daq.setupAnalogOutput([chanNameOut], audioHW.daqTrigChanIn,
                                          int(outputRate), spkOut)
                    daq.startAnalogOutput()

                    # setup the input task
                    daq.setupAnalogInput(chanNamesIn, audioHW.daqTrigChanIn,
                                         int(inputRate), numInputSamples)
                    daq.startAnalogInput()

                    # trigger the acquiisiton by sending ditital pulse
                    daq.sendDigTrig(audioHW.daqTrigChanOut)

                    mic_data = daq.readAnalogInput()
                    mic_data_ref = mic_data / micVoltsPerPascal

                if not testMode:
                    daq.waitDoneOutput(stopAndClear=True)
                    daq.stopAnalogInput()
                    daq.clearAnalogInput()

                micData, spCal = processSpkCalDataChirp(
                    mic_data_chirp, mic_data_ref, inputRate, spCal, spkIdx, f0,
                    f1, refFreq)

                pl = appObj.spCal_micFFT
                pl.clear()
                df = micData.fft_freq[1] - micData.fft_freq[0]
                nf = len(micData.fft_freq)
                i1 = int(freq_array[0] * 0.9 / df)
                i2 = int(freq_array[-1] * 1.1 / df)
                print("SpeakerCalibration: df= %0.3f i1= %d i2= %d nf= %d" %
                      (df, i1, i2, nf))
                pl.plot(micData.fft_freq[i1:i2],
                        micData.fft_mag[i1:i2],
                        pen='b')
                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Frequency', 'Hz', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Magnitude', 'db SPL', **labelStyle)

                pl = appObj.spCal_spkResp
                pl.clear()
                #                pl.plot(1000*spCal.freq[spkIdx, :], spCal.magResp[spkIdx, :], pen="b", symbol='o')
                pl.plot(freq_array,
                        spCal.magResp[spkIdx, :],
                        pen="b",
                        symbol='o')
                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Frequency', 'Hz', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Magnitude', 'db SPL', **labelStyle)
            else:
                for freq in freq_array:
                    print("runSpeakerCal freq=" + repr(freq))
                    spkOut = makeSpeakerCalibrationOutput(
                        freq, audioHW, trialDur)
                    npts = len(spkOut)
                    t = np.linspace(0, npts / outputRate, npts)

                    pl = appObj.spCal_output
                    pl.clear()
                    endIdx = int(5e-3 * outputRate)  # only plot first 5 ms
                    pl.plot(t[0:endIdx], spkOut[0:endIdx], pen='b')

                    numInputSamples = int(inputRate * len(spkOut) / outputRate)

                    if testMode:
                        # mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)
                        pass
                    else:

                        # setup the output task
                        daq.setupAnalogOutput([chanNameOut],
                                              audioHW.daqTrigChanIn,
                                              int(outputRate), spkOut)
                        daq.startAnalogOutput()

                        # setup the input task
                        daq.setupAnalogInput(chanNamesIn,
                                             audioHW.daqTrigChanIn,
                                             int(inputRate), numInputSamples)
                        daq.startAnalogInput()

                        # trigger the acquiisiton by sending ditital pulse
                        daq.sendDigTrig(audioHW.daqTrigChanOut)

                        mic_data = daq.readAnalogInput()
                        mic_data = mic_data[0, :]
                        mic_data = mic_data / micVoltsPerPascal

                    if not testMode:
                        daq.stopAnalogInput()
                        daq.stopAnalogOutput()
                        daq.clearAnalogInput()
                        daq.clearAnalogOutput()

                    npts = len(mic_data)
                    t = np.linspace(0, npts / inputRate, npts)
                    pl = appObj.spCal_micInput
                    pl.clear()
                    pl.plot(t, mic_data, pen='b')

                    labelStyle = appObj.xLblStyle
                    pl.setLabel('bottom', 'Time', 's', **labelStyle)
                    labelStyle = appObj.yLblStyle
                    pl.setLabel('left', 'Response', 'Pa', **labelStyle)

                    micData, spCal = processSpkCalData(mic_data, freq,
                                                       freq_idx, inputRate,
                                                       spCal, spkIdx, audioHW)

                    pl = appObj.spCal_micFFT
                    pl.clear()
                    df = micData.fft_freq[1] - micData.fft_freq[0]
                    nf = len(micData.fft_freq)
                    i1 = int(freq_array[0] * 0.9 / df)
                    i2 = int(freq_array[-1] * 1.1 / df)
                    print(
                        "SpeakerCalibration: df= %0.3f i1= %d i2= %d nf= %d" %
                        (df, i1, i2, nf))
                    pl.plot(micData.fft_freq[i1:i2],
                            micData.fft_mag[i1:i2],
                            pen='b')
                    labelStyle = appObj.xLblStyle
                    pl.setLabel('bottom', 'Frequency', 'Hz', **labelStyle)
                    labelStyle = appObj.yLblStyle
                    pl.setLabel('left', 'Magnitude', 'db SPL', **labelStyle)

                    pl = appObj.spCal_spkResp
                    pl.clear()
                    #                pl.plot(1000*spCal.freq[spkIdx, :], spCal.magResp[spkIdx, :], pen="b", symbol='o')
                    pl.plot(freq_array,
                            spCal.magResp[spkIdx, :],
                            pen="b",
                            symbol='o')
                    labelStyle = appObj.xLblStyle
                    pl.setLabel('bottom', 'Frequency', 'Hz', **labelStyle)
                    labelStyle = appObj.yLblStyle
                    pl.setLabel('left', 'Magnitude', 'db SPL', **labelStyle)

                    freq_idx += 1

                    #                if appObj.getSaveState():
                    #                    if not isSaveDirInit:
                    #                        saveDir = OCTCommon.initSaveDir(saveOpts, 'Speaker Calibration', audioParams=audioParams)
                    #                        isSaveDirInit = True
                    #
                    #                    if saveOpts.saveRaw:
                    #                        OCTCommon.saveRawData(mic_data, saveDir, frameNum, dataType=3)

                    QtGui.QApplication.processEvents(
                    )  # check for GUI events, such as button presses

                    # if done flag, break out of loop
                    if appObj.doneFlag:
                        break

                frameNum += 1

            # if done flag, break out of loop
            if appObj.doneFlag:
                break

        if not appObj.doneFlag:
            saveDir = appObj.configPath
            saveSpeakerCal(spCal, saveDir)
            appObj.audioHW.loadSpeakerCalFromProcData(spCal)
            appObj.spCal = spCal

    except Exception as ex:
        traceback.print_exc(file=sys.stdout)
        QtGui.QMessageBox.critical(
            appObj, "Error",
            "Error during calibration. Check command line output for details")

    8  # update the audio hardware speaker calibration
    appObj.isCollecting = False
    QtGui.QApplication.processEvents(
    )  # check for GUI events, such as button presses
    appObj.finishCollection()
Beispiel #9
0
def runCM(appObj, testMode=False):
    print("runCM")
    
    appObj.tabWidget.setCurrentIndex(4)
    appObj.doneFlag = False
    appObj.isCollecting = True
    # trigRate = octfpga.GetTriggerRate()
    audioHW = appObj.audioHW
    bioamp = appObj.bioamp
    outputRate = audioHW.DAQOutputRate
    inputRate = audioHW.DAQInputRate

    # freq_array2 = audioParams.freq[1, :]
    freqArray = appObj.getFrequencyArray()
    
    if testMode:
        testDataDir = os.path.join(appObj.basePath, 'exampledata', 'Speaker Calibration')
#        filePath = os.path.join(testDataDir, 'AudioParams.pickle')
#        f = open(filePath, 'rb')
#        audioParams = pickle.load(f)
#        f.close()
    else:
        # freqArray = appObj.getFrequencyArray()
        i1 = appObj.CM_freqLow_comboBox.currentIndex()
        i2 = appObj.CM_freqHigh_comboBox.currentIndex()
        print("runCM: i1= ", i1, "i2= ", i2)

        ampLow = appObj.CMampLow_spinBox.value()
        ampHigh = appObj.CMampHigh_spinBox.value()
        ampDelta = appObj.CMampDelta_spinBox.value()
        
        # ampArray = np.arange(ampLow, ampHigh, ampDelta)
        #numSteps = np.floor((ampHigh - ampLow)/ampDelta) + 1
        #ampArray = np.linspace(ampLow, ampHigh, numSteps)
        ampArray = np.arange(ampLow, ampHigh, ampDelta)
        if ampArray[-1] != ampHigh:
            ampArray = np.hstack((ampArray, ampHigh))
        
        freqArray = freqArray[i1:i2+1]

    # numSpk = audioParams.getNumSpeakers()
    if not testMode:
        from DAQHardware import DAQHardware
        daq = DAQHardware()

    chanNamesIn= [ audioHW.mic_daqChan, bioamp.daqChan]
    micVoltsPerPascal = audioHW.micVoltsPerPascal
    trialDur = appObj.CMstimDuration_dblSpinBox.value() * 1e-3
    stimOffset = appObj.CMstimOffset_dblSpinBox.value() * 1e-3
    nReps = appObj.CMtrialReps_spinBox.value()

    # set input rate to three times the highest output frequency, to allow plus a 
    
    #inputRate = 3*freqArray[-1]
    # inputRate = outputRate / int(np.floor(outputRate / inputRate))  # pick closest input rate that evenly divides output rate
    
    
    try:
        frameNum = 0
        isSaveDirInit = False
        chanNameOut = audioHW.speakerL_daqChan 
        attenLines = audioHW.attenL_daqChan
        
        freq_idx = 0
        CMdata = None
        
        for freq in freqArray:
            spkOut_trial = makeCMOutput(freq, trialDur, stimOffset, audioHW)
            spkOut = np.tile(spkOut_trial, nReps)
            
            npts = len(spkOut_trial)
            tOut = np.linspace(0, npts/outputRate, npts)
            print("runCM npts=%d len(spkOut_trial)= %d len(tOut)= %d" % (npts, len(spkOut_trial), len(tOut)))
            amp_idx = 0
            ptsPerRep = inputRate
            
            for amp in ampArray:
                print("runCM freq=" + repr(freq), " amp= ", + amp, " freq_idx= ", freq_idx, " amp_idx= ", amp_idx)
                vOut, attenLvl = audioHW.getCalibratedOutputVoltageAndAttenLevel(freq, amp, 0)
                
                # attenSig = AudioHardware.makeLM1972AttenSig(0)
                if not testMode:
                    # AudioHardware.Attenuator.setLevel(attenLvl, attenLines)
                    audioHW.setAttenuatorLevel(attenLvl, audioHW.maxAtten, daq)
                    # daq.sendDigOutCmd(attenLines, attenSig)
                    # appObj.oct_hw.SetAttenLevel(0, attenLines)
                
                pl = appObj.spCal_output
                pl.clear()
                endIdx = int(5e-3 * outputRate)        # only plot first 5 ms
                #pl.plot(t[0:endIdx], spkOut[0:endIdx], pen='b')
                pl.plot(tOut, spkOut_trial, pen='b')
                
                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Time', 's', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Output', 'V', **labelStyle)
                        
                
                numInputSamples = nReps*int(inputRate*len(spkOut_trial)/outputRate)
                
                
                if testMode:
                    # mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)                    
                    pass
                else:
    
                    # setup the output task
                    daq.setupAnalogOutput([chanNameOut], audioHW.daqTrigChanIn, int(outputRate), vOut*spkOut)
                    daq.startAnalogOutput()
                    
                    # setup the input task
                    daq.setupAnalogInput(chanNamesIn, audioHW.daqTrigChanIn, int(inputRate), numInputSamples) 
                    daq.startAnalogInput()
                
                    # trigger the acquiisiton by sending ditital pulse
                    daq.sendDigTrig(audioHW.daqTrigChanOut)
                    
                    timeout = numInputSamples/inputRate + 2
                    dataIn = daq.readAnalogInput(timeout)
                    mic_data = dataIn[0, :]
                    bioamp_data = dataIn[1, :]
                    
                    mic_data = mic_data/micVoltsPerPascal
                    bioamp_data = bioamp_data/bioamp.gain
                
                    daq.waitDoneOutput(stopAndClear=True)
                    daq.stopAnalogInput()
                    daq.clearAnalogInput()
                
                npts = len(mic_data)
                t = np.linspace(0, npts/inputRate, npts)
                pl = appObj.spCal_micInput
                pl.clear()
                pl.plot(t, mic_data, pen='b')
                
                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Time', 's', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Response', 'Pa', **labelStyle)
    
    # def processCMData(mic_data, bioamp_data, nReps, freq, amp_idx, inputRate, CMdataIn):            
                CMptData, CMdata = processCMData(mic_data, bioamp_data, nReps, freq, freq_idx, amp_idx, freqArray, ampArray, inputRate, CMdata)

                print("runCM: plotting data")
                plotCMdata(appObj, CMptData, CMdata)
                
    #                if appObj.getSaveState():
    #                    if not isSaveDirInit:
    #                        saveDir = OCTCommon.initSaveDir(saveOpts, 'Speaker Calibration', audioParams=audioParams)
    #                        isSaveDirInit = True
    #    
    #                    if saveOpts.saveRaw:
    #                        OCTCommon.saveRawData(mic_data, saveDir, frameNum, dataType=3)
                    
                QtGui.QApplication.processEvents() # check for GUI events, such as button presses
                
                # if done flag, break out of loop
                if appObj.doneFlag:
                    break
                
                frameNum += 1
                amp_idx += 1
                
            # if done flag, break out of loop
            if appObj.doneFlag:
                break
            
            freq_idx += 1


        saveOpts = appObj.getSaveOpts()
        workbook = appObj.excelWB
        note = saveOpts.note
        number = appObj.CMnumber
        name = 'CM'
        d = datetime.datetime.now()
        timeStr = d.strftime('%H_%M_%S')
        excelWS = CMPCommon.initExcelSpreadsheet(workbook, name, number, timeStr, note)
    
        appObj.CMnumber += 1                
        saveOpts.saveTracings = appObj.CM_saveTracings_checkBox.isChecked(  )
        saveDir = appObj.saveDir_lineEdit.text()
        saveCMDataXLS(CMdata, trialDur, nReps, excelWS, saveOpts)
        #saveCMData(CMdata, trialDur, nReps, appObj.saveFileTxt_filepath, saveOpts, timeStr)
        
        plotName = 'CM %d %s %s' % (number, timeStr, saveOpts.note)
        saveCMDataFig(CMdata, trialDur, nReps, saveDir, plotName, timeStr)
        saveCMDataPickle(CMdata, trialDur, nReps, plotName, saveOpts, timeStr)
            
    except Exception as ex:
        traceback.print_exc(file=sys.stdout)
        QtGui.QMessageBox.critical (appObj, "Error", "Error during collection. Check command line output for details")
        
    8# update the audio hardware speaker calibration                     
    appObj.isCollecting = False
    QtGui.QApplication.processEvents() # check for GUI events, such as button presses
    appObj.finishCollection()