def runSpeakerCal(appObj, testMode=False):
    print("runSpeakerCal")
    appObj.tabWidget.setCurrentIndex(0)
    appObj.doneFlag = False
    appObj.isCollecting = True
    # trigRate = octfpga.GetTriggerRate()
    audioHW = appObj.audioHW
    outputRate = audioHW.DAQOutputRate
    inputRate = audioHW.DAQInputRate
    
    if testMode:
        testDataDir = os.path.join(appObj.basePath, 'exampledata', 'Speaker Calibration')
#        filePath = os.path.join(testDataDir, 'AudioParams.pickle')
#        f = open(filePath, 'rb')
#        audioParams = pickle.load(f)
#        f.close()
    else:
        freqArray = appObj.getFrequencyArray()
        
    # numSpk = audioParams.getNumSpeakers()
    numSpk = 1
    cIdx = appObj.speaker_comboBox.currentIndex()
    if cIdx > 0:
        numSpk = 2
    
    if not testMode:
        from DAQHardware import DAQHardware
        daq = DAQHardware()

    chanNamesIn= [ audioHW.mic_daqChan]
    micVoltsPerPascal = audioHW.micVoltsPerPascal
    # mode = 'chirp'
    mode = ''
    spCal = None
    # freq_array2 = audioParams.freq[1, :]
    
    try:
        frameNum = 0
        isSaveDirInit = False
        trialDur = appObj.spCal_stimDuration_dblSpinBox.value()*1e-3
        
        freq_array = freqArray
        freq_array2 = freqArray/1.22

        if numSpk == 1:
            freq_array = np.concatenate((freq_array, freq_array2))
            freq_array = np.sort(freq_array)
            freq_array2 = freq_array
        
        spCal = SpeakerCalData(np.vstack((freq_array, freq_array2)))            
        
        for spkNum in range(0, numSpk):
            chanNameOut = audioHW.speakerL_daqChan 
            #attenLines = audioHW.attenL_daqChan
            #attenLinesOther = audioHW.attenR_daqChan
            spkIdx = 0

            attenLvl1 = 0
            attenLvl2 = audioHW.maxAtten
                
            if spkNum == 2:
                #chanNameOut = audioHW.speakerR_daqChan
                #attenLines = audioHW.attenR_daqChan
                #attenLinesOther = audioHW.attenL_daqChan
                spkIdx = 1
                attenLvl1 = audioHW.maxAtten
                attenLvl2 = 0
    
            freq_idx = 0


            if not testMode:
                audioHW.setAttenuatorLevel(attenLvl1, attenLvl2, daq)
                
                # daq.sendDigOutCmd(attenLines, attenSig)
                # appObj.oct_hw.SetAttenLevel(0, attenLines)
                
            if mode == 'chirp':
                tChirp = 1    
                f0 = 100
                f1 = 100e3
                k = (f1- f0)/tChirp
                nChirpPts = round(outputRate*tChirp)
                t = np.linspace(0, tChirp, nChirpPts)
                spkOut = np.cos(2*np.pi*(f1*t + (k/2)*t**2))
                
                pl = appObj.spCal_output
                pl.clear()
                endIdx = int(5e-3 * outputRate)        # only plot first 5 ms
                pl.plot(t[0:endIdx], spkOut[0:endIdx], pen='b')
                        
                numInputSamples = int(inputRate*len(spkOut)/outputRate) 
                
                if testMode:
                    # mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)                    
                    pass
                else:
                    daq.setupAnalogOutput([chanNameOut], audioHW.daqTrigChanIn, int(outputRate), spkOut)
                    daq.startAnalogOutput()
                    
                    # setup the input task
                    daq.setupAnalogInput(chanNamesIn, audioHW.daqTrigChanIn, int(inputRate), numInputSamples) 
                    daq.startAnalogInput()
                
                    # trigger the acquiisiton by sending ditital pulse
                    daq.sendDigTrig(audioHW.daqTrigChanOut)
                    
                    mic_data = daq.readAnalogInput()
                    mic_data = mic_data[0, :]
                    mic_data_chirp = mic_data/micVoltsPerPascal

                
                if not testMode:
                    daq.waitDoneOutput(stopAndClear=True)
                    daq.stopAnalogInput()
                    daq.clearAnalogInput()
                    
                npts = len(mic_data)
                t = np.linspace(0, npts/inputRate, npts)
                pl = appObj.spCal_micInput
                pl.clear()
                pl.plot(t, mic_data, pen='b')
                
                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Time', 's', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Response', 'Pa', **labelStyle)
                
                # play refernce tone
                refFreq = 4e3
                tRef = 50e-3
                
                nRefPts = round(outputRate*tRef)
                t = np.linspace(0, tRef, nRefPts)
                spkOut = np.cos(2*np.pi*refFreq*t)
                
                # apply envelope
                i1 = round(outputRate*1e-3)
                i2 = nRefPts- i1
                env = np.linspace(0, 1, i1)
                spkOut[0:i1] = spkOut[0:i1]*env
                spkOut[i2:] = spkOut[i2:]*(1-env)
                
                if testMode:
                    # mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)                    
                    pass
                else:
                    daq.setupAnalogOutput([chanNameOut], audioHW.daqTrigChanIn, int(outputRate), spkOut)
                    daq.startAnalogOutput()
                    
                    # setup the input task
                    daq.setupAnalogInput(chanNamesIn, audioHW.daqTrigChanIn, int(inputRate), numInputSamples) 
                    daq.startAnalogInput()
                
                    # trigger the acquiisiton by sending ditital pulse
                    daq.sendDigTrig(audioHW.daqTrigChanOut)
                    
                    mic_data = daq.readAnalogInput()
                    mic_data_ref = mic_data/micVoltsPerPascal
                    
                if not testMode:
                    daq.waitDoneOutput(stopAndClear=True)
                    daq.stopAnalogInput()
                    daq.clearAnalogInput()
                
                micData, spCal = processSpkCalDataChirp(mic_data_chirp, mic_data_ref, inputRate, spCal, spkIdx, f0, f1, refFreq)
                    
                pl = appObj.spCal_micFFT
                pl.clear()
                df = micData.fft_freq[1] - micData.fft_freq[0]
                nf = len(micData.fft_freq)
                i1 = int(freq_array[0]*0.9/df)
                i2 = int(freq_array[-1]*1.1/df)
                print("SpeakerCalibration: df= %0.3f i1= %d i2= %d nf= %d" % (df, i1, i2, nf))
                pl.plot(micData.fft_freq[i1:i2], micData.fft_mag[i1:i2], pen='b')
                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Frequency', 'Hz', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Magnitude', 'db SPL', **labelStyle)
                
                pl = appObj.spCal_spkResp
                pl.clear()
#                pl.plot(1000*spCal.freq[spkIdx, :], spCal.magResp[spkIdx, :], pen="b", symbol='o')
                pl.plot(freq_array, spCal.magResp[spkIdx, :], pen="b", symbol='o')
                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Frequency', 'Hz', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Magnitude', 'db SPL', **labelStyle)
            else:
                for freq in freq_array:
                    print("runSpeakerCal freq=" + repr(freq))
                    spkOut = makeSpeakerCalibrationOutput(freq, audioHW, trialDur)    
                    npts = len(spkOut)
                    t = np.linspace(0, npts/outputRate, npts)
                    
                    pl = appObj.spCal_output
                    pl.clear()
                    endIdx = int(5e-3 * outputRate)        # only plot first 5 ms
                    pl.plot(t[0:endIdx], spkOut[0:endIdx], pen='b')
                            
                    numInputSamples = int(inputRate*len(spkOut)/outputRate) 
                    
                    if testMode:
                        # mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)                    
                        pass
                    else:
    
                        # setup the output task
                        daq.setupAnalogOutput([chanNameOut], audioHW.daqTrigChanIn, int(outputRate), spkOut)
                        daq.startAnalogOutput()
                        
                        # setup the input task
                        daq.setupAnalogInput(chanNamesIn, audioHW.daqTrigChanIn, int(inputRate), numInputSamples) 
                        daq.startAnalogInput()
                    
                        # trigger the acquiisiton by sending ditital pulse
                        daq.sendDigTrig(audioHW.daqTrigChanOut)
                        
                        mic_data = daq.readAnalogInput()
                        mic_data = mic_data[0, :]
                        mic_data = mic_data/micVoltsPerPascal
    
                    
                    if not testMode:
                        daq.stopAnalogInput()
                        daq.stopAnalogOutput()
                        daq.clearAnalogInput()
                        daq.clearAnalogOutput()
                    
                    npts = len(mic_data)
                    t = np.linspace(0, npts/inputRate, npts)
                    pl = appObj.spCal_micInput
                    pl.clear()
                    pl.plot(t, mic_data, pen='b')
                    
                    labelStyle = appObj.xLblStyle
                    pl.setLabel('bottom', 'Time', 's', **labelStyle)
                    labelStyle = appObj.yLblStyle
                    pl.setLabel('left', 'Response', 'Pa', **labelStyle)
                    
                    micData, spCal = processSpkCalData(mic_data, freq, freq_idx, inputRate, spCal, spkIdx, audioHW)
                    
                    pl = appObj.spCal_micFFT
                    pl.clear()
                    df = micData.fft_freq[1] - micData.fft_freq[0]
                    nf = len(micData.fft_freq)
                    i1 = int(freq_array[0]*0.9/df)
                    i2 = int(freq_array[-1]*1.1/df)
                    print("SpeakerCalibration: df= %0.3f i1= %d i2= %d nf= %d" % (df, i1, i2, nf))
                    pl.plot(micData.fft_freq[i1:i2], micData.fft_mag[i1:i2], pen='b')
                    labelStyle = appObj.xLblStyle
                    pl.setLabel('bottom', 'Frequency', 'Hz', **labelStyle)
                    labelStyle = appObj.yLblStyle
                    pl.setLabel('left', 'Magnitude', 'db SPL', **labelStyle)
                    
                    pl = appObj.spCal_spkResp
                    pl.clear()
    #                pl.plot(1000*spCal.freq[spkIdx, :], spCal.magResp[spkIdx, :], pen="b", symbol='o')
                    pl.plot(freq_array, spCal.magResp[spkIdx, :], pen="b", symbol='o')
                    labelStyle = appObj.xLblStyle
                    pl.setLabel('bottom', 'Frequency', 'Hz', **labelStyle)
                    labelStyle = appObj.yLblStyle
                    pl.setLabel('left', 'Magnitude', 'db SPL', **labelStyle)
                    
                    freq_idx += 1
                    
    #                if appObj.getSaveState():
    #                    if not isSaveDirInit:
    #                        saveDir = OCTCommon.initSaveDir(saveOpts, 'Speaker Calibration', audioParams=audioParams)
    #                        isSaveDirInit = True
    #    
    #                    if saveOpts.saveRaw:
    #                        OCTCommon.saveRawData(mic_data, saveDir, frameNum, dataType=3)
                        
                    QtGui.QApplication.processEvents() # check for GUI events, such as button presses
                    
                    # if done flag, break out of loop
                    if appObj.doneFlag:
                        break
                
                frameNum += 1

                
            # if done flag, break out of loop
            if appObj.doneFlag:
                break
                
        if not appObj.doneFlag:
            saveDir = appObj.configPath
            saveSpeakerCal(spCal, saveDir)
            appObj.audioHW.loadSpeakerCalFromProcData(spCal)
            appObj.spCal = spCal            
            
    except Exception as ex:
        traceback.print_exc(file=sys.stdout)
        QtGui.QMessageBox.critical (appObj, "Error", "Error during calibration. Check command line output for details")           
        
    8# update the audio hardware speaker calibration                     
    appObj.isCollecting = False
    QtGui.QApplication.processEvents() # check for GUI events, such as button presses
    appObj.finishCollection()


    
Beispiel #2
0
def runABR(appObj, testMode=False):
    print("runABR")
    
    appObj.tabWidget.setCurrentIndex(3)
    appObj.doneFlag = False
    appObj.isCollecting = True
    # trigRate = octfpga.GetTriggerRate()
    audioHW = appObj.audioHW
    bioamp = appObj.bioamp
    outputRate = audioHW.DAQOutputRate
    inputRate = audioHW.DAQInputRate

    # freq_array2 = audioParams.freq[1, :]
    freqArray = appObj.getFrequencyArray()
    ABRparams = ABRParams(appObj)
    
    if testMode:
        testDataDir = os.path.join(appObj.basePath, 'exampledata', 'Speaker Calibration')
#        filePath = os.path.join(testDataDir, 'AudioParams.pickle')
#        f = open(filePath, 'rb')
#        audioParams = pickle.load(f)
#        f.close()
    else:
        # freqArray = appObj.getFrequencyArray()
        i1 = appObj.ABR_freqLow_comboBox.currentIndex()
        i2 = appObj.ABR_freqHigh_comboBox.currentIndex()
        print("runABR: i1= ", i1, "i2= ", i2)
        ampLow = appObj.ABRampLow_spinBox.value()
        ampHigh = appObj.ABRampHigh_spinBox.value()
        ampDelta = appObj.ABRampDelta_spinBox.value()
        
        # ampArray = np.arange(ampLow, ampHigh, ampDelta)
        #numSteps = np.floor((ampHigh - ampLow)/ampDelta) + 1
        #ampArray = np.linspace(ampLow, ampHigh, numSteps)
        if ampLow == ampHigh:
            ampArray = np.array([ampLow])
        else:
            ampArray = np.arange(ampLow, ampHigh, ampDelta)
            if ampArray[-1] != ampHigh:
                ampArray = np.hstack((ampArray, ampHigh))
        
        freqArray = freqArray[i1:i2+1]
    
    if ABRparams.click:
        freqArray = freqArray[0:1]  # only use single freqeucny
        clickRMS = appObj.ABRclick_RMS
        
    # numSpk = audioParams.getNumSpeakers()
    if not testMode:
        from DAQHardware import DAQHardware
        daq = DAQHardware()

    chanNamesIn= [ audioHW.mic_daqChan, bioamp.daqChan]
    micVoltsPerPascal = audioHW.micVoltsPerPascal
    
    

    # set input rate to three times the highest output frequency, to allow plus a 
    
#    inputRate = 3*freqArray[-1]
#    print("runABR: outputRate= ", outputRate, " inputRate= ", inputRate)
#    inputRate = np.max((inputRate, 6e3))   # inpute rate should be at least 6 kHz because ABR responses occur 0.3 - 3 kHz
#    inputRate = outputRate / int(np.floor(outputRate / inputRate))  # pick closest input rate that evenly divides output rate
#    print("runABR: inputRate(final)= ", inputRate)
    
    try:
        frameNum = 0
        numFrames = len(freqArray)*len(ampArray)
        isSaveDirInit = False
        chanNameOut = audioHW.speakerL_daqChan 
        attenLines = audioHW.attenL_daqChan
        
        freq_idx = 0
        ABRdata = None
        appObj.status_label.setText("Running")
        appObj.progressBar.setValue(0)
        
        for freq in freqArray:
            spkOut_trial = makeABROutput(freq, ABRparams, audioHW)
            npts = len(spkOut_trial)
            spkOut = np.tile(spkOut_trial, ABRparams.nReps)
            # invert every other trial, necessary for ABR/CAP output 
            for n in range(1, ABRparams.nReps, 2):
                idx1 = n*npts
                idx2 = (n+1)*npts
                spkOut[idx1:idx2] = -spkOut[idx1:idx2]
#            plt.figure(5)
#            plt.clf()
#            plt.plot(spkOut)
            tOut = np.linspace(0, npts/outputRate, npts)
            print("runABR npts=%d len(spkOut_trial)= %d len(tOut)= %d" % (npts, len(spkOut_trial), len(tOut)))
            amp_idx = 0
            ptsPerRep = int(inputRate*ABRparams.trialDur)
            
            for amp in ampArray:
                print("runABR freq=" + repr(freq), " amp= ", + amp, " freq_idx= ", freq_idx, " amp_idx= ", amp_idx)
                if ABRparams.click:
                    clickRMS = appObj.ABRclick_RMS
                    attenLvl = 0
                    vOut = 10**((amp - clickRMS)/20)
                    minV = audioHW.speakerOutputRng[0]
                    if vOut < minV:
                        attenLvl = int(round(20*np.log10(minV/vOut)))
                        vOut = minV
                else:
                    vOut, attenLvl = audioHW.getCalibratedOutputVoltageAndAttenLevel(freq, amp, 0)
                
                print("runABR vOut= ", vOut, " atenLvl=", attenLvl)
                
                if vOut > audioHW.speakerOutputRng[1]:
                    print("runABR vOut= ", vOut, "  out of range")
                    continue
                elif attenLvl > audioHW.maxAtten:
                    print("runABR attenLvl= ", attenLvl, "  gerater than maximum attenuation")
                    continue
                    
                # attenSig = AudioHardware.makeLM1972AttenSig(0)
                if not testMode:
                    AudioHardware.Attenuator.setLevel(attenLvl, attenLines)
                    # daq.sendDigOutABRd(attenLines, attenSig)
                    # appObj.oct_hw.SetAttenLevel(0, attenLines)
                
                pl = appObj.ABR_output
                pl.clear()
                endIdx = int(5e-3 * outputRate)        # only plot first 5 ms
                #pl.plot(t[0:endIdx], spkOut[0:endIdx], pen='b')
                pl.plot(tOut, spkOut_trial, pen='b')
                
                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Time', 's', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Output', 'V', **labelStyle)
                
                numInputSamples = ABRparams.nReps*int(inputRate*len(spkOut_trial)/outputRate)
                
                if testMode:
                    # mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)                    
                    pass
                else:
    
                    # setup the output task
                    daq.setupAnalogOutput([chanNameOut], audioHW.daqTrigChanIn, int(outputRate), vOut*spkOut)
                    daq.startAnalogOutput()
                    
                    # setup the input task
                    daq.setupAnalogInput(chanNamesIn, audioHW.daqTrigChanIn, int(inputRate), numInputSamples) 
                    daq.startAnalogInput()
                
                    # trigger the acquiisiton by sending ditital pulse
                    daq.sendDigTrig(audioHW.daqTrigChanOut)
                    
                    timeout = numInputSamples/inputRate + 2
                    dataIn = daq.readAnalogInput(timeout)
                    mic_data = dataIn[0, :]
                    bioamp_data = dataIn[1, :]
                    
                    mic_data = mic_data/micVoltsPerPascal
                    bioamp_data = bioamp_data/bioamp.gain
                
                    daq.waitDoneInput()
                    daq.stopAnalogInput()
                    daq.clearAnalogInput()
                    
                    daq.waitDoneOutput(stopAndClear=True)
                
#                npts = len(mic_data)
#                t = np.linspace(0, npts/inputRate, npts)
#                pl = appObj.ABR_micInput
#                pl.clear()
#                pl.plot(t, mic_data, pen='b')
#                
#                labelStyle = appObj.xLblStyle
#                pl.setLabel('bottom', 'Time', 's', **labelStyle)
#                labelStyle = appObj.yLblStyle
#                pl.setLabel('left', 'Response', 'Pa', **labelStyle)
    
    # def processABRData(mic_data, bioamp_data, nReps, freq, amp_idx, inputRate, ABRdataIn):            
                ABRptData, ABRdata = processABRData(mic_data, bioamp_data, freq, freq_idx, amp_idx, freqArray, ampArray, inputRate, ABRdata, ABRparams)

                print("runABR: plotting data")
                plotABRdata(appObj, ABRptData, ABRdata)
                
    #                if appObj.getSaveState():
    #                    if not isSaveDirInit:
    #                        saveDir = OCTCommon.initSaveDir(saveOpts, 'Speaker Calibration', audioParams=audioParams)
    #                        isSaveDirInit = True
    #    
    #                    if saveOpts.saveRaw:
    #                        OCTCommon.saveRawData(mic_data, saveDir, frameNum, dataType=3)
                idx1 = round(inputRate*ABRparams.stimOffset)
                idx2 = idx1 + round(inputRate*ABRparams.stimDur)
                
                mic_data = mic_data[idx1:idx2] 
                rms = np.mean(mic_data ** 2) ** 0.5
                rms = 20*np.log10(rms/2e-5)
                
                appObj.ABR_rms_label.setText("%0.1f dB" % rms)                    
                
                QtGui.QApplication.processEvents() # check for GUI events, such as button presses
                
                # if done flag, break out of loop
                if appObj.doneFlag:
                    break
                
                frameNum += 1
                amp_idx += 1
                appObj.progressBar.setValue(frameNum/numFrames)
                
            # if done flag, break out of loop
            if appObj.doneFlag:
                break
            
            freq_idx += 1


        saveOpts = appObj.getSaveOpts()
        workbook = appObj.excelWB
        note = saveOpts.note
        number = appObj.ABRnumber
        name = 'ABR'
        d = datetime.datetime.now()
        timeStr = d.strftime('%H_%M_%S')
        excelWS = CMPCommon.initExcelSpreadsheet(workbook, name, number, timeStr, note)
    
        appObj.ABRnumber += 1                
        #saveOpts.saveTracings = appObj.ABR_saveTracings_checkBox.isChecked()
        saveOpts.saveTracings = True
        saveDir = appObj.saveDir_lineEdit.text()
        saveABRDataXLS(ABRdata, ABRparams, excelWS, saveOpts)
        #saveABRData(ABRdata, trialDur, nReps, appObj.saveFileTxt_filepath, saveOpts, timeStr)
        
        plotName = 'ABR %d %s %s' % (number, timeStr, saveOpts.note)
        saveABRDataFig(ABRdata, ABRparams, saveDir, plotName, timeStr)
        saveABRDataPickle(ABRdata, ABRparams, plotName, saveOpts, timeStr)
            
    except Exception as ex:
        traceback.print_exc(file=sys.stdout)
        QtGui.QMessageBox.critical (appObj, "Error", "Error during collection. Check command line output for details")           
        
    # update the audio hardware speaker calibration                     
    appObj.isCollecting = False
    QtGui.QApplication.processEvents() # check for GUI events, such as button presses
    appObj.finishCollection()
Beispiel #3
0
def calibrateClick(appObj, testMode=False):
    print("ABR.calibrateClick")
    
    appObj.tabWidget.setCurrentIndex(3)
    appObj.doneFlag = False
    appObj.isCollecting = True
    # trigRate = octfpga.GetTriggerRate()
    audioHW = appObj.audioHW
    outputRate = audioHW.DAQOutputRate
    inputRate = audioHW.DAQInputRate

    # freq_array2 = audioParams.freq[1, :]
    freqArray = appObj.getFrequencyArray()
    
    if testMode:
        testDataDir = os.path.join(appObj.basePath, 'exampledata', 'Speaker Calibration')
#        filePath = os.path.join(testDataDir, 'AudioParams.pickle')
#        f = open(filePath, 'rb')
#        audioParams = pickle.load(f)
#        f.close()
    else:
        pass

    # numSpk = audioParams.getNumSpeakers()
    if not testMode:
        from DAQHardware import DAQHardware
        daq = DAQHardware()

    chanNamesIn= [ audioHW.mic_daqChan]
    micVoltsPerPascal = audioHW.micVoltsPerPascal
    ABRparams = ABRParams(appObj)
    ABRparams.click = True
    ABRparams.nReps = 20
    print("ABR.calibrateClick ABRparams=", ABRparams.__dict__)
    # set input rate to three times the highest output frequency, to allow plus a 
    
#    inputRate = 3*freqArray[-1]
#    print("runABR: outputRate= ", outputRate, " inputRate= ", inputRate)
#    inputRate = np.max((inputRate, 6e3))   # inpute rate should be at least 6 kHz because ABR responses occur 0.3 - 3 kHz
#    inputRate = outputRate / int(np.floor(outputRate / inputRate))  # pick closest input rate that evenly divides output rate
#    print("runABR: inputRate(final)= ", inputRate)
    
    try:
        chanNameOut = audioHW.speakerL_daqChan 
        attenLines = audioHW.attenL_daqChan
        
        spkOut_trial = makeABROutput(4e3, ABRparams, audioHW)
        spkOut = np.tile(spkOut_trial, ABRparams.nReps)
        npts = len(spkOut_trial)
        tOut = np.linspace(0, npts/outputRate, npts)
            
        # attenSig = AudioHardware.makeLM1972AttenSig(0)
        if not testMode:
            AudioHardware.Attenuator.setLevel(0, attenLines)
                
        pl = appObj.ABR_output
        pl.clear()
        endIdx = int(5e-3 * outputRate)        # only plot first 5 ms
        #pl.plot(t[0:endIdx], spkOut[0:endIdx], pen='b')
        pl.plot(tOut, spkOut_trial, pen='b')
                
        labelStyle = appObj.xLblStyle
        pl.setLabel('bottom', 'Time', 's', **labelStyle)
        labelStyle = appObj.yLblStyle
        pl.setLabel('left', 'Output', 'V', **labelStyle)
                
        numInputSamples = int(inputRate*len(spkOut)/outputRate)
        
        if testMode:
            # mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)                    
            pass
        else:
            # setup the output task
            daq.setupAnalogOutput([chanNameOut], audioHW.daqTrigChanIn, int(outputRate), spkOut)
            daq.startAnalogOutput()
            
            # setup the input task
            daq.setupAnalogInput(chanNamesIn, audioHW.daqTrigChanIn, int(inputRate), numInputSamples) 
            daq.startAnalogInput()
        
            # trigger the acquiisiton by sending ditital pulse
            daq.sendDigTrig(audioHW.daqTrigChanOut)
            
            timeout = numInputSamples/inputRate + 2
            dataIn = daq.readAnalogInput(timeout)
            mic_data = dataIn[0, :]
            
            mic_data = mic_data/micVoltsPerPascal
        
            daq.waitDoneInput()
            daq.stopAnalogInput()
            daq.clearAnalogInput()
            
            daq.waitDoneOutput(stopAndClear=True)
        
        print("ABR.calibrateClick: plotting data")
        npts = len(mic_data)
        
        # reshape and average the mic data
        ptsPerRep = npts // ABRparams.nReps
        mic_data = np.reshape(mic_data, (ABRparams.nReps, ptsPerRep))
        mic_data = np.mean(mic_data, 0)
        
        # plot mic data
        npts = len(mic_data)
        t = np.linspace(0, npts/inputRate, npts)
        pl = appObj.ABR_micInput
        pl.clear()
        pl.plot(t, mic_data, pen='b')
        
        labelStyle = appObj.xLblStyle
        pl.setLabel('bottom', 'Time', 's', **labelStyle)
        labelStyle = appObj.yLblStyle
        pl.setLabel('left', 'Response', 'Pa', **labelStyle)
        
        idx1 = round(inputRate*ABRparams.stimOffset)
        idx2 = idx1 + round(inputRate*ABRparams.stimDur)
        
        mic_data = mic_data[idx1:idx2] 
        # apply high pass filter to get rid of LF components
#        (b, a) = scipy.signal.butter(5, 100/inputRate, 'high')
#        mic_data = scipy.signal.lfilter(b, a, mic_data) 

        rms = np.mean(mic_data ** 2) ** 0.5
        rms = 20*np.log10(rms/2e-5)
        appObj.ABRclick_RMS = rms
        
        appObj.ABR_rms_label.setText("%0.1f dB" % rms)
        print("ABR.calibrateClick: RMS= ", rms)
        
    except Exception as ex:
        traceback.print_exc(file=sys.stdout)
        QtGui.QMessageBox.critical (appObj, "Error", "Error during collection. Check command line output for details")           
        
    appObj.isCollecting = False
    QtGui.QApplication.processEvents() # check for GUI events, such as button presses
    appObj.finishCollection()
Beispiel #4
0
def run(appObj, testMode=False):
    print("ReadMicBioAmp.run")
    appObj.tabWidget.setCurrentIndex(1)
    appObj.doneFlag = False
    appObj.isCollecting = True
    # trigRate = octfpga.GetTriggerRate()
    audioHW = appObj.audioHW
    bioamp = appObj.bioamp
    # outputRate = audioHW.DAQOutputRate
    inputRate = audioHW.DAQInputRate
    
    if not testMode:
        from DAQHardware import DAQHardware
        daq = DAQHardware()

    chanNamesIn= [ audioHW.mic_daqChan, bioamp.daqChan]
    micVoltsPerPascal = audioHW.micVoltsPerPascal
    bioampGain = bioamp.gain
    
    firstIter = True
    while not appObj.doneFlag:
        try:
            if testMode:
                # mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)                    
                continue
            else:
                # inputTime = 100e-3
                inputTime = 1e-3*appObj.readMicBioamp_duration_dblSpinBox.value()
                numInputSamples = round(inputRate*inputTime)
                
                # setup the input task
                daq.setupAnalogInput(chanNamesIn, audioHW.daqTrigChanIn, int(inputRate), numInputSamples) 
                daq.startAnalogInput()
            
                # trigger the acquiisiton by sending ditital pulse
                daq.sendDigTrig(audioHW.daqTrigChanOut)
                
                data = daq.readAnalogInput()
                print("data.shape= ", data.shape)
                mic_data = data[0, :]
                bioamp_data = data[1, :]
                #mic_data = data[:, 0]
                #bioamp_data = data[:, 1]
                
                mic_data = mic_data/micVoltsPerPascal
                bioamp_data = bioamp_data/bioampGain
    
                daq.stopAnalogInput()
                daq.clearAnalogInput()
            
            npts = len(mic_data)
            t = np.linspace(0, npts/inputRate, npts)
            
            pl = appObj.inputs_micPlot
            if firstIter:
                pl.clear()
                
                micPI = pl.plot(t, mic_data, pen='b')
                
                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Time', 's', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Response', 'Pa', **labelStyle)
            else:
                data = np.vstack((t, mic_data))
                micPI.setData(data.transpose())
            
            pl = appObj.inputs_bioampPlot
            if firstIter:
                pl.clear()
                bioampPI = pl.plot(t, bioamp_data, pen='b')
    
                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Time', 's', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Response', 'V', **labelStyle)
            else:
                data = np.vstack((t, bioamp_data))
                bioampPI.setData(data.transpose())
            
            numfftpts = npts*2
            win_fcn = 2*np.hanning(npts)
            mic_fft = np.fft.fft(win_fcn*mic_data, numfftpts)
            endIdx = np.ceil(numfftpts/2)
            mic_fft = mic_fft[0:endIdx]
            mic_fft_mag = 2*np.abs(mic_fft)/numfftpts
            
            fftrms_corr = 1/(np.sqrt(2))
            mic_fft_mag = fftrms_corr*mic_fft_mag 
            mic_fft_mag_log = 20*np.log10(mic_fft_mag/20e-6 )  # 20e-6 pa
            
            mic_freq = np.linspace(0, inputRate/2, endIdx)
            
            pl = appObj.inputs_micFFTPlot
            if firstIter:
                pl.clear()
                micFFTPI = pl.plot(mic_freq, mic_fft_mag_log, pen='b')

                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Frequency', 'Hz', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Magnitude', 'dB SPL', **labelStyle)
            else:
                data = np.vstack((mic_freq, mic_fft_mag_log))
                micFFTPI.setData(data.transpose())
                
            Wn = [300, 3000]
            Wn = np.array(Wn)/inputRate
            #Wn = [0.001, 0.01]
#            (b, a) = scipy.signal.butter(5, Wn=Wn, btype='bandpass')
            (b, a) = scipy.signal.iirfilter(2, Wn,  btype='bandpass', ftype='bessel')

            #b = scipy.signal.firwin(21, Wn)
            #a = [1.0]
            bioamp_filt = scipy.signal.lfilter(b, a, bioamp_data) 

            print("bioamp_data.shape= ", bioamp_data.shape, " t.shape=", t.shape, " Wn=", Wn)
            print("b= ", b)
            print("a= ", a)

            
            if firstIter:
                pl = appObj.inputs_bioampFilteredPlot
                pl.clear()
                bioampFFTPI = pl.plot(t, bioamp_filt, pen='b')
    
                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Time', 's', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Response', 'V', **labelStyle)
                
            else:
                #data = np.vstack((t, bioamp_filt))
                bioampFFTPI.setData(t, bioamp_filt)
                
            firstIter = False
            
        except Exception as ex:
            traceback.print_exc(file=sys.stdout)
            QtGui.QMessageBox.critical (appObj, "Error", "Error. Check command line output for details")
            appObj.doneFlag = True
            
    
        QtGui.QApplication.processEvents() # check for GUI events, such as button presses
    
    # update the audio hardware speaker calibration                     
    appObj.finishCollection()
Beispiel #5
0
def runDPOAE(appObj, testMode=False):
    print("runDPOAE")

    try:
        appObj.tabWidget.setCurrentIndex(2)
        appObj.doneFlag = False
        appObj.isCollecting = True
        # trigRate = octfpga.GetTriggerRate()
        audioHW = appObj.audioHW
        bioamp = appObj.bioamp
        outputRate = audioHW.DAQOutputRate
        inputRate = audioHW.DAQInputRate

        # freq_array2 = audioParams.freq[1, :]
        freqArray = appObj.getFrequencyArray()

        if testMode:
            testDataDir = os.path.join(appObj.basePath, 'exampledata', 'DPOAE')
    #        filePath = os.path.join(testDataDir, 'AudioParams.pickle')
    #        f = open(filePath, 'rb')
    #        audioParams = pickle.load(f)
    #        f.close()
        else:
            # freqArray = appObj.getFrequencyArray()
            i1 = appObj.DPOAE_freqLow_comboBox.currentIndex()
            i2 = appObj.DPOAE_freqHigh_comboBox.currentIndex()
            print("runDPOAE: i1= ", i1, "i2= ", i2)

            ampLow = appObj.DPOAE_ampLow_spinBox.value()
            ampHigh = appObj.DPOAE_ampHigh_spinBox.value()
            ampDelta = appObj.DPOAE_ampDelta_spinBox.value()

            # ampArray = np.arange(ampLow, ampHigh, ampDelta)
            #numSteps = np.floor((ampHigh - ampLow)/ampDelta) + 1
            #ampArray = np.linspace(ampLow, ampHigh, numSteps)
            ampArray = np.arange(ampLow, ampHigh, ampDelta)
            if ampArray[-1] != ampHigh:
                ampArray = np.hstack((ampArray, ampHigh))

            freqArray = freqArray[i1:i2 + 1]

        # numSpk = audioParams.getNumSpeakers()
        if not testMode:
            from DAQHardware import DAQHardware
            daq = DAQHardware()

        chanNamesIn = [audioHW.mic_daqChan, bioamp.daqChan]
        micVoltsPerPascal = audioHW.micVoltsPerPascal
        trialDur = appObj.DPOAE_stimDuration_dblSpinBox.value() * 1e-3
        # nReps = appObj.DPOAEtrialReps_spinBox.value()

        # set input rate multiple the highest output frequency, a little more than Nyquest so stim frequency is more towards center
        inputRate = 4 * freqArray[-1]
        inputRate = outputRate / int(
            np.floor(outputRate / inputRate)
        )  # pick closest input rate that evenly divides output rate

        frameNum = 0
        isSaveDirInit = False

        attenLines1 = audioHW.attenL_daqChan
        attenLines2 = audioHW.attenR_daqChan

        freq_idx = 0
        DPOAEdata = None
        numSpk = appObj.speaker_comboBox.currentIndex() + 1
        chanNameOut = audioHW.speakerL_daqChan
        if numSpk > 1:
            chanNameOut = [audioHW.speakerL_daqChan, audioHW.speakerR_daqChan]
        print("runDPOAE numSpk=", numSpk)

        for freq in freqArray:
            sp1, sp2 = makeDPOAEOutput(freq, trialDur, audioHW)
            # spkOut = np.tile(spkOut_trial, nReps)

            npts = len(sp1)
            tOut = np.linspace(0, npts / outputRate, npts)
            print("runDPOAE npts=%d len(spkOut)= %d len(tOut)= %d" %
                  (npts, len(sp1), len(tOut)))
            amp_idx = 0
            # ptsPerRep = inputRate

            for amp in ampArray:
                print("runDPOAE freq=" + repr(freq), " amp= ", +amp,
                      " freq_idx= ", freq_idx, " amp_idx= ", amp_idx)
                vOut1, attenLvl1 = audioHW.getCalibratedOutputVoltageAndAttenLevel(
                    freq, amp, 0)
                spkNum = numSpk - 1
                vOut2, attenLvl2 = audioHW.getCalibratedOutputVoltageAndAttenLevel(
                    freq / 1.22, amp, spkNum)
                if vOut1 > 0 and vOut2 > 0:
                    # attenSig = AudioHardware.makeLM1972AttenSig(0)
                    if not testMode:
                        if numSpk > 1:
                            audioHW.setAttenuatorLevel(attenLvl1, attenLvl2,
                                                       daq)
                        else:
                            if attenLvl1 > attenLvl2:
                                dbDiff = attenLvl1 - attenLvl2
                                attenLvl1 = attenLvl2
                                vOut2 = vOut2 * (10**(dbDiff / 20))
                            elif attenLvl1 < attenLvl2:
                                dbDiff = attenLvl2 - attenLvl1
                                attenLvl2 = attenLvl1
                                vOut1 = vOut1 * (10**(dbDiff / 20))

                            audioHW.setAttenuatorLevel(attenLvl1,
                                                       audioHW.maxAtten, daq)

                        # daq.sendDigOutDPOAEd(attenLines, attenSig)
                        # appObj.oct_hw.SetAttenLevel(0, attenLines)

                    pl = appObj.DPOAE_output
                    pl.clear()
                    endIdx = int(5e-3 * outputRate)  # only plot first 5 ms
                    #pl.plot(t[0:endIdx], spkOut[0:endIdx], pen='b')
                    pl.plot(tOut, sp1 + sp2, pen='b')

                    labelStyle = appObj.xLblStyle
                    pl.setLabel('bottom', 'Time', 's', **labelStyle)
                    labelStyle = appObj.yLblStyle
                    pl.setLabel('left', 'Output', 'V', **labelStyle)

                    numInputSamples = int(inputRate * len(sp1) / outputRate)

                    if testMode:
                        # mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)
                        pass
                    else:

                        # setup the output task
                        if numSpk > 1:
                            spkOut = np.vstack((vOut1 * sp1, vOut2 * sp2))
                        else:
                            spkOut = vOut1 * sp1 + vOut2 * sp2

                        daq.setupAnalogOutput([chanNameOut],
                                              audioHW.daqTrigChanIn,
                                              int(outputRate), spkOut)
                        daq.startAnalogOutput()

                        # setup the input task
                        daq.setupAnalogInput(chanNamesIn,
                                             audioHW.daqTrigChanIn,
                                             int(inputRate), numInputSamples)
                        daq.startAnalogInput()

                        # trigger the acquiisiton by sending ditital pulse
                        daq.sendDigTrig(audioHW.daqTrigChanOut)

                        dataIn = daq.readAnalogInput()
                        mic_data = dataIn[0, :]

                        mic_data = mic_data / micVoltsPerPascal

                        daq.stopAnalogInput()
                        daq.stopAnalogOutput()
                        daq.clearAnalogInput()
                        daq.clearAnalogOutput()

                    npts = len(mic_data)
                    t = np.linspace(0, npts / inputRate, npts)
                    pl = appObj.spCal_micInput
                    pl.clear()
                    pl.plot(t, mic_data, pen='b')

                    labelStyle = appObj.xLblStyle
                    pl.setLabel('bottom', 'Time', 's', **labelStyle)
                    labelStyle = appObj.yLblStyle
                    pl.setLabel('left', 'Response', 'Pa', **labelStyle)

                    DPOAEptData, DPOAEdata = processDPOAEData(
                        mic_data, freq, freq_idx, amp_idx, freqArray, ampArray,
                        inputRate, DPOAEdata)

                    print("runDPOAE: plotting data")
                    plotDPOAEdata(appObj, DPOAEptData, DPOAEdata)

                QtGui.QApplication.processEvents(
                )  # check for GUI events, such as button presses

                # if done flag, break out of loop
                if appObj.doneFlag:
                    break

                frameNum += 1
                amp_idx += 1

            # if done flag, break out of loop
            if appObj.doneFlag:
                break

            freq_idx += 1

        saveOpts = appObj.getSaveOpts()
        workbook = appObj.excelWB
        note = saveOpts.note
        number = appObj.DPOAEnumber
        name = 'DPOAE'
        d = datetime.datetime.now()
        timeStr = d.strftime('%H_%M_%S')

        saveOpts.saveMicData = appObj.DPOAE_saveMicData_checkBox.isChecked()
        saveOpts.saveMicFFT = appObj.DPOAE_saveMicFFT_checkBox.isChecked()
        saveDir = appObj.saveDir_lineEdit.text()

        plotName = 'DPOAE %d %s %s' % (number, timeStr, saveOpts.note)
        plotFilePath = saveDPOAEDataFig(DPOAEdata, trialDur, saveDir, plotName,
                                        timeStr)

        reply = QtGui.QMessageBox.question(
            appObj, 'Save', "Keep data?",
            QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,
            QtGui.QMessageBox.Yes)
        if reply == QtGui.QMessageBox.Yes:
            excelWS = CMPCommon.initExcelSpreadsheet(workbook, name, number,
                                                     timeStr, note)
            saveDPOAEDataXLS(DPOAEdata, trialDur, excelWS, saveOpts)
            #saveDPOAEData(DPOAEdata, trialDur, nReps, appObj.saveFileTxt_filepath, saveOpts, timeStr)

            saveDPOAEDataPickle(DPOAEdata, trialDur, plotName, saveOpts,
                                timeStr)
            appObj.DPOAEnumber += 1

        else:
            os.remove(plotFilePath)

    except Exception as ex:
        traceback.print_exc(file=sys.stdout)
        QtGui.QMessageBox.critical(
            appObj, "Error",
            "Error during collection. Check command line output for details")

    8  # update the audio hardware speaker calibration
    appObj.isCollecting = False
    QtGui.QApplication.processEvents(
    )  # check for GUI events, such as button presses
    appObj.finishCollection()
Beispiel #6
0
def runDispersion(appObj):
    DebugLog.log("runDispersion")
    appObj.tabWidget.setCurrentIndex(5)
    appObj.doneFlag = False
    appObj.isCollecting = True
    # trigRate = octfpga.GetTriggerRate()
    mirrorDriver = appObj.mirrorDriver
    
    # set the mirror position to (0,0)
    chanNames = [mirrorDriver.X_daqChan, mirrorDriver.Y_daqChan]
    data = np.zeros(2)
    if not appObj.oct_hw.IsDAQTestingMode():
        from DAQHardware import DAQHardware
        daq = DAQHardware()
        daq.writeValues(chanNames, data)
    
    pd_background = None
    
    fpgaOpts = appObj.oct_hw.fpgaOpts
    numklinpts = fpgaOpts.numKlinPts
    if fpgaOpts.InterpDownsample > 0:
        numklinpts =  numklinpts // 2
    
    # keep looping until we are signlaed to stop by GUI (flag set in appObj)
    try:
        frameNum = 0
        saveDirInit = False
        testDataDir = os.path.join(appObj.basePath, 'exampledata', 'Dispersion')
        dispData = DispersionData(fpgaOpts)
        klin = None # initialze klin to None so it will be computed first iteration
        savedFPGADisp = False
        
        while not appObj.doneFlag: 
            # setup and grab the OCT data - this will also fire the mirror output
            numTrigs = appObj.disp_numTrigs_spinBox.value()
            processMode = OCTCommon.ProcessMode(appObj.processMode_comboBox.currentIndex())
                
            # get proessing optiosn from GUI        
            PD_LP_fc = appObj.disp_pd_lpfilter_cutoff_dblSpinBox.value()
            PD_HP_fc = appObj.disp_pd_hpfilter_cutoff_dblSpinBox.value()
            PDfiltCutoffs = [PD_LP_fc, PD_HP_fc]
            magWin_LPfilterCutoff = appObj.disp_magwin_lpfilter_cutoff_dblSpinBox.value()
            
            dispData.mziFilter = appObj.mziFilter.value()
            dispData.magWin_LPfilterCutoff = magWin_LPfilterCutoff
            dispData.PDfilterCutoffs = PDfiltCutoffs
            
            collectBG = appObj.disp_collectBG_pushButton.isChecked()
            
            pd_background = dispData.phDiode_background                
    
            if processMode == OCTCommon.ProcessMode.FPGA:
                if appObj.oct_hw.IsOCTTestingMode():
                    pd_data = OCTCommon.loadRawData(testDataDir, frameNum % 19, dataType=1)
                    numklinpts = 1400
                else:
                    err, pd_data = appObj.oct_hw.AcquireOCTDataInterpPD(numTrigs)
                    DebugLog.log("runDispersion(): AcquireOCTDataInterpPD() err = %d" % err)
                    # process the data
                dispData = processData(pd_data, dispData, numklinpts, PDfiltCutoffs, magWin_LPfilterCutoff, pd_background, collectBG)

            elif processMode == OCTCommon.ProcessMode.SOFTWARE:
                if appObj.oct_hw.IsOCTTestingMode():
                    ch0_data,ch1_data=JSOraw.getSavedRawData(numTrigs,appObj.dispData.requestedSamplesPerTrig,appObj.savedDataBuffer)
                else:
                    # def AcquireOCTDataRaw(self, numTriggers, samplesPerTrig=-1, Ch0Shift=-1, startTrigOffset=0):
                    samplesPerTrig = fpgaOpts.SamplesPerTrig*2 + fpgaOpts.Ch0Shift*2
                    err, ch0_data,ch1_data = appObj.oct_hw.AcquireOCTDataRaw(numTrigs, samplesPerTrig)

                pdData,mziData,actualSamplesPerTrig = JSOraw.channelShift(ch0_data,ch1_data,dispData)    # shift the two channels to account for delays in the sample data compared to the MZI data 
                mzi_hilbert, mzi_mag, mzi_ph, k0 = JSOraw.processMZI(mziData, dispData)                # calculate k0 from the phase of the MZI data
                k0Cleaned = JSOraw.cleank0(k0, dispData) # Adjust the k0 curves so that the unwrapping all starts at the same phase    
                pd_data, klin = JSOraw.processPD(pdData, k0Cleaned, dispData, klin)  # Interpolate the PD data based upon the MZI data
                dispData.Klin = klin
                dispData = processUniqueDispersion(pd_data, dispData, pd_background, collectBG)
            else:
                QtGui.QMessageBox.critical (appObj, "Error", "Unsuppoted processing mode for current hardware")
                
            # plot the data
            plotDispData(appObj, dispData, PDfiltCutoffs)
            
            if appObj.getSaveState():
                dispFilePath = saveDispersionData(dispData, appObj.settingsPath)
            
                saveOpts = appObj.getSaveOpts()
                if saveOpts.saveRaw:
                    if not saveDirInit:
                        saveDir = OCTCommon.initSaveDir(saveOpts, 'Dispersion')
                        saveDirInit = True
                    
                    OCTCommon.saveRawData(pd_data, saveDir, frameNum, dataType=1)
                if processMode == OCTCommon.ProcessMode.FPGA:
                    appObj.dispDataFPGA = dispData
                    dispFilePathFPGA = dispFilePath
                    savedFPGADisp = True
                else:
                    appObj.dispData = dispData
                
            frameNum += 1
            QtGui.QApplication.processEvents() # check for GUI events, particularly the "done" flag
            
        if savedFPGADisp:
            DebugLog.log("runDispersion(): loading dispersion file into FPGA")
            appObj.loadDispersionIntoFPGA(dispFilePathFPGA, appObj.oct_hw.fpgaOpts)
            
    except Exception as ex:
        # raise ex
        traceback.print_exc(file=sys.stdout)
        QtGui.QMessageBox.critical (appObj, "Error", "Error during scan. Check command line output for details")
    finally:
        appObj.isCollecting = False
        QtGui.QApplication.processEvents() # check for GUI events
        appObj.finishCollection()        
Beispiel #7
0
def runCM(appObj, testMode=False):
    print("runCM")

    appObj.tabWidget.setCurrentIndex(4)
    appObj.doneFlag = False
    appObj.isCollecting = True
    # trigRate = octfpga.GetTriggerRate()
    audioHW = appObj.audioHW
    bioamp = appObj.bioamp
    outputRate = audioHW.DAQOutputRate
    inputRate = audioHW.DAQInputRate

    # freq_array2 = audioParams.freq[1, :]
    freqArray = appObj.getFrequencyArray()

    if testMode:
        testDataDir = os.path.join(appObj.basePath, 'exampledata',
                                   'Speaker Calibration')
#        filePath = os.path.join(testDataDir, 'AudioParams.pickle')
#        f = open(filePath, 'rb')
#        audioParams = pickle.load(f)
#        f.close()
    else:
        # freqArray = appObj.getFrequencyArray()
        i1 = appObj.CM_freqLow_comboBox.currentIndex()
        i2 = appObj.CM_freqHigh_comboBox.currentIndex()
        print("runCM: i1= ", i1, "i2= ", i2)

        ampLow = appObj.CMampLow_spinBox.value()
        ampHigh = appObj.CMampHigh_spinBox.value()
        ampDelta = appObj.CMampDelta_spinBox.value()

        # ampArray = np.arange(ampLow, ampHigh, ampDelta)
        #numSteps = np.floor((ampHigh - ampLow)/ampDelta) + 1
        #ampArray = np.linspace(ampLow, ampHigh, numSteps)
        ampArray = np.arange(ampLow, ampHigh, ampDelta)
        if ampArray[-1] != ampHigh:
            ampArray = np.hstack((ampArray, ampHigh))

        freqArray = freqArray[i1:i2 + 1]

    # numSpk = audioParams.getNumSpeakers()
    if not testMode:
        from DAQHardware import DAQHardware
        daq = DAQHardware()

    chanNamesIn = [audioHW.mic_daqChan, bioamp.daqChan]
    micVoltsPerPascal = audioHW.micVoltsPerPascal
    trialDur = appObj.CMstimDuration_dblSpinBox.value() * 1e-3
    stimOffset = appObj.CMstimOffset_dblSpinBox.value() * 1e-3
    nReps = appObj.CMtrialReps_spinBox.value()

    # set input rate to three times the highest output frequency, to allow plus a

    #inputRate = 3*freqArray[-1]
    # inputRate = outputRate / int(np.floor(outputRate / inputRate))  # pick closest input rate that evenly divides output rate

    try:
        frameNum = 0
        isSaveDirInit = False
        chanNameOut = audioHW.speakerL_daqChan
        attenLines = audioHW.attenL_daqChan

        freq_idx = 0
        CMdata = None

        for freq in freqArray:
            spkOut_trial = makeCMOutput(freq, trialDur, stimOffset, audioHW)
            spkOut = np.tile(spkOut_trial, nReps)

            npts = len(spkOut_trial)
            tOut = np.linspace(0, npts / outputRate, npts)
            print("runCM npts=%d len(spkOut_trial)= %d len(tOut)= %d" %
                  (npts, len(spkOut_trial), len(tOut)))
            amp_idx = 0
            ptsPerRep = inputRate

            for amp in ampArray:
                print("runCM freq=" + repr(freq), " amp= ", +amp,
                      " freq_idx= ", freq_idx, " amp_idx= ", amp_idx)
                vOut, attenLvl = audioHW.getCalibratedOutputVoltageAndAttenLevel(
                    freq, amp, 0)

                # attenSig = AudioHardware.makeLM1972AttenSig(0)
                if not testMode:
                    # AudioHardware.Attenuator.setLevel(attenLvl, attenLines)
                    audioHW.setAttenuatorLevel(attenLvl, audioHW.maxAtten, daq)
                    # daq.sendDigOutCmd(attenLines, attenSig)
                    # appObj.oct_hw.SetAttenLevel(0, attenLines)

                pl = appObj.spCal_output
                pl.clear()
                endIdx = int(5e-3 * outputRate)  # only plot first 5 ms
                #pl.plot(t[0:endIdx], spkOut[0:endIdx], pen='b')
                pl.plot(tOut, spkOut_trial, pen='b')

                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Time', 's', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Output', 'V', **labelStyle)

                numInputSamples = nReps * int(
                    inputRate * len(spkOut_trial) / outputRate)

                if testMode:
                    # mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)
                    pass
                else:

                    # setup the output task
                    daq.setupAnalogOutput([chanNameOut], audioHW.daqTrigChanIn,
                                          int(outputRate), vOut * spkOut)
                    daq.startAnalogOutput()

                    # setup the input task
                    daq.setupAnalogInput(chanNamesIn, audioHW.daqTrigChanIn,
                                         int(inputRate), numInputSamples)
                    daq.startAnalogInput()

                    # trigger the acquiisiton by sending ditital pulse
                    daq.sendDigTrig(audioHW.daqTrigChanOut)

                    timeout = numInputSamples / inputRate + 2
                    dataIn = daq.readAnalogInput(timeout)
                    mic_data = dataIn[0, :]
                    bioamp_data = dataIn[1, :]

                    mic_data = mic_data / micVoltsPerPascal
                    bioamp_data = bioamp_data / bioamp.gain

                    daq.waitDoneOutput(stopAndClear=True)
                    daq.stopAnalogInput()
                    daq.clearAnalogInput()

                npts = len(mic_data)
                t = np.linspace(0, npts / inputRate, npts)
                pl = appObj.spCal_micInput
                pl.clear()
                pl.plot(t, mic_data, pen='b')

                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Time', 's', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Response', 'Pa', **labelStyle)

                # def processCMData(mic_data, bioamp_data, nReps, freq, amp_idx, inputRate, CMdataIn):
                CMptData, CMdata = processCMData(mic_data, bioamp_data, nReps,
                                                 freq, freq_idx, amp_idx,
                                                 freqArray, ampArray,
                                                 inputRate, CMdata)

                print("runCM: plotting data")
                plotCMdata(appObj, CMptData, CMdata)

                #                if appObj.getSaveState():
                #                    if not isSaveDirInit:
                #                        saveDir = OCTCommon.initSaveDir(saveOpts, 'Speaker Calibration', audioParams=audioParams)
                #                        isSaveDirInit = True
                #
                #                    if saveOpts.saveRaw:
                #                        OCTCommon.saveRawData(mic_data, saveDir, frameNum, dataType=3)

                QtGui.QApplication.processEvents(
                )  # check for GUI events, such as button presses

                # if done flag, break out of loop
                if appObj.doneFlag:
                    break

                frameNum += 1
                amp_idx += 1

            # if done flag, break out of loop
            if appObj.doneFlag:
                break

            freq_idx += 1

        saveOpts = appObj.getSaveOpts()
        workbook = appObj.excelWB
        note = saveOpts.note
        number = appObj.CMnumber
        name = 'CM'
        d = datetime.datetime.now()
        timeStr = d.strftime('%H_%M_%S')
        excelWS = CMPCommon.initExcelSpreadsheet(workbook, name, number,
                                                 timeStr, note)

        appObj.CMnumber += 1
        saveOpts.saveTracings = appObj.CM_saveTracings_checkBox.isChecked()
        saveDir = appObj.saveDir_lineEdit.text()
        saveCMDataXLS(CMdata, trialDur, nReps, excelWS, saveOpts)
        #saveCMData(CMdata, trialDur, nReps, appObj.saveFileTxt_filepath, saveOpts, timeStr)

        plotName = 'CM %d %s %s' % (number, timeStr, saveOpts.note)
        saveCMDataFig(CMdata, trialDur, nReps, saveDir, plotName, timeStr)
        saveCMDataPickle(CMdata, trialDur, nReps, plotName, saveOpts, timeStr)

    except Exception as ex:
        traceback.print_exc(file=sys.stdout)
        QtGui.QMessageBox.critical(
            appObj, "Error",
            "Error during collection. Check command line output for details")

    8  # update the audio hardware speaker calibration
    appObj.isCollecting = False
    QtGui.QApplication.processEvents(
    )  # check for GUI events, such as button presses
    appObj.finishCollection()
Beispiel #8
0
def runJSOraw(appObj):
    DebugLog.log("runJSOraw")
    try:
        appObj.tabWidget.setCurrentIndex(7)
        appObj.doneFlag = False
        appObj.isCollecting = True
        appObj.JSOsaveDispersion_pushButton.setEnabled(True)
        appObj.JSOloadDispersion_pushButton.setEnabled(False)
        dispData = appObj.dispData             # this class holds all the dispersion compensation data    
        if dispData is None:
            dispData = Dispersion.DispersionData()
            
        laserSweepFreq=appObj.octSetupInfo.getTriggerRate()
        mirrorDriver = appObj.mirrorDriver
        
        if not appObj.oct_hw.IsOCTTestingMode():     # prepare to get new data            
            # set the mirror position to (0,0)
            chanNames = [mirrorDriver.X_daqChan, mirrorDriver.Y_daqChan]
            data = np.zeros(2)
            from DAQHardware import DAQHardware
            daq = DAQHardware()
            daq.writeValues(chanNames, data)
        else:
            appObj.savedDataBuffer.loadData(appObj)
    
        peakXPos=np.array([0],dtype=int)       
        peakYPos=np.array([0],dtype=float)       
        peakXPos1=np.array([0],dtype=int)       
        peakYPos1=np.array([0],dtype=float)       
                   
        while appObj.doneFlag == False:
            # read data analysis settings from the GUI
            numTrigs=appObj.numTrig.value()
            dispData.requestedSamplesPerTrig=appObj.requestedSamplesPerTrig.value()
            dispData.startSample=appObj.startSample.value()
            dispData.endSample=appObj.endSample.value()
            dispData.numKlinPts=appObj.numKlinPts.value()
            dispData.Klin=np.zeros(dispData.numKlinPts)         
            dispData.numShiftPts=appObj.numShiftPts.value()
            dispData.filterWidth=appObj.filterWidth.value()
            dispData.mziFilter=appObj.mziFilter.value()
            dispData.magWin_LPfilterCutoff=appObj.dispMagWindowFilter.value()
            dispData.PDfilterCutoffs=[0,0]
            dispData.dispCode=appObj.dispersionCompAlgorithm_comboBox.currentIndex() 
            dispData.dispMode=appObj.dispersionCompAlgorithm_comboBox.currentText()
                     
            # Get data using one of several methods
            if appObj.oct_hw.IsOCTTestingMode():
                ch0_data,ch1_data=getSavedRawData(numTrigs,dispData.requestedSamplesPerTrig,appObj.savedDataBuffer)
            else:
                ch0_data,ch1_data=getNewRawData(numTrigs,dispData.requestedSamplesPerTrig,appObj)
            
            if appObj.saveData_checkBox.isChecked()==True:      # save data to disk for later use if desired
                fileName='Mirror_Raw'
                dataToSave = (ch0_data, ch1_data)              
                appObj.savedDataBuffer.saveData(appObj,dataToSave,fileName)                                
                appObj.saveData_checkBox.setChecked(False)                     
                
            # delay the MZI to account for it having a shorter optical path than the sample/reference arm path, then calculate k0 as the MZI phase
            pdData,mziData,actualSamplesPerTrig=channelShift(ch0_data,ch1_data,dispData)    
            textString='Actual samples per trigger: {actualSamplesPerTrig}'.format(actualSamplesPerTrig=actualSamplesPerTrig)            
            appObj.actualSamplesPerTrig_label.setText(textString)         
            
            import time
            t1 = time.time()
            mzi_hilbert, mzi_mag, mzi_ph, k0 = processMZI(mziData, dispData) 
            mzi_proc_time = time.time() - t1
            print("MZI processing time = %0.4f ms" % (mzi_proc_time*1000))
    
            # Adjust the k0 curves so that the unwrapping all starts at the same phase
            appObj.k0_plot_3.clear()
            appObj.k0_plot_4.clear()
            appObj.k0_plot_5.clear()
            t1 = time.time()
            k0Cleaned=cleank0(k0,dispData)              
            k0clean_time = time.time() - t1
            print("k0 cleaning time = %0.4f ms" % (k0clean_time*1000))
            
            for i in range(numTrigs):
                appObj.k0_plot_3.plot(k0[i,:2*dispData.startSample], pen=(i,numTrigs)) 
            startMZIdata1=k0[:,dispData.startSample]
            appObj.k0_plot_4.plot(startMZIdata1, pen='r') 
            startMZIdata2=k0Cleaned[:,dispData.startSample]
            appObj.k0_plot_4.plot(startMZIdata2, pen='b') 
            for i in range(numTrigs):
                appObj.k0_plot_5.plot(k0Cleaned[i,:2*dispData.startSample], pen=(i,numTrigs)) 
            k0=k0Cleaned
            
            # Interpolate the PD data based upon the MZI data and calculate the a-lines before dispersion compensation      
            t1 = time.time()
            pd_interpRaw, klin = processPD(pdData, k0, dispData)
            interpPD_time = time.time() - t1
            print("Interp PD time = %0.4f ms" % (interpPD_time*1000))
            dispData.Klin=klin
            pd_fftNoInterp, alineMagNoInterp, alinePhaseNoInterp = calculateAline(pdData[:,dispData.startSample:dispData.endSample])

            t1 = time.time()
            pd_fftRaw, alineMagRaw, alinePhaseRaw = calculateAline(pd_interpRaw)
            alineCalc_time = time.time() - t1
            print("Aline calc time = %0.4f ms" % (alineCalc_time*1000))
            
            # find the mirror in the a-line to determine the filter settings, and then perform the dispersion compensatsion 
            rangePeak1=[100, 900]
            alineAve1=np.average(alineMagRaw,axis=0) 
            peakXPos1[0]=np.argmax(alineAve1[rangePeak1[0]:rangePeak1[1]])+rangePeak1[0]
            peakYPos1[0]=alineAve1[peakXPos1[0]]     
            width=dispData.filterWidth*(rangePeak1[1]-rangePeak1[0])/2         
            dispData.PDfilterCutoffs[0]=(peakXPos1[0]+width)/2048
            dispData.PDfilterCutoffs[1]=(peakXPos1[0]-width)/2048
        
            dispersionCorrection(pd_interpRaw,dispData)
            appObj.dispData=dispData      #store the local variable in the overall class so that it can be saved when the save button is pressed
            
            # now correct the data using dispersion compensation and then process the a-lines
            pd_interpDispComp = dispData.magWin * pd_interpRaw * (np.cos(-1*dispData.phaseCorr) + 1j * np.sin(-1*dispData.phaseCorr))
            pd_fftDispComp, alineMagDispComp, alinePhaseDispComp = calculateAline(pd_interpDispComp)
                  
            #scale k0 and the MZI to the same range to plot them so they overlap
            k0Ripple= scipy.signal.detrend(k0[0,500:700],axis=-1)
            k0RippleNorm=k0Ripple/k0Ripple.max()
            mziDataRipple= scipy.signal.detrend(mziData[0,500:700],axis=-1)
            mziDataNorm=mziDataRipple/mziDataRipple.max()
           
            # Find the peak of the A-line within a range and calculate the phase noise
            rangePeak=[100, 900]
            alineAve=np.average(alineMagDispComp,axis=0) 
            peakXPos[0]=np.argmax(alineAve[rangePeak[0]:rangePeak[1]])+rangePeak[0]
            peakYPos[0]=alineAve[peakXPos[0]]              
            t=np.arange(numTrigs)/laserSweepFreq
            phaseNoiseTD=np.unwrap(alinePhaseDispComp[:,peakXPos[0]])
            phaseNoiseTD=phaseNoiseTD-np.mean(phaseNoiseTD)
            phaseNoiseTD=phaseNoiseTD*1310e-9/(4*np.pi*1.32)
            phaseNoiseFFT = np.abs(np.fft.rfft(phaseNoiseTD))/(numTrigs/2)
#            phaseNoiseFD = 20*np.log10(np.abs(phaseNoiseFFT))        
            freq = np.fft.rfftfreq(numTrigs)*laserSweepFreq
    
            # Clear all of the plots
            appObj.mzi_plot_2.clear() 
            appObj.pd_plot_2.clear()
            appObj.mzi_mag_plot_2.clear()
            appObj.mzi_phase_plot_2.clear()
            appObj.k0_plot_2.clear()
            appObj.interp_pdRaw_plot.clear()
            appObj.interp_pdDispComp_plot.clear()
            appObj.alineNoInterp_plot.clear()
            appObj.alineRaw_plot.clear()
            appObj.alineDispComp_plot.clear()
            appObj.phaseNoiseTD_plot.clear()
            appObj.phaseNoiseFD_plot.clear()
            appObj.dispWnfcMag_plot.clear()
            appObj.dispWnfcPh_plot.clear()
           
            # Plot all the data
            if appObj.plotFirstOnly_checkBox.isChecked()==True:
                i=0
                appObj.pd_plot_2.plot(pdData[i,:], pen='r')            
                appObj.mzi_plot_2.plot(mziData[i,:], pen='r')            
                appObj.mzi_mag_plot_2.plot(mzi_mag[i,:], pen='r')            
                appObj.k0_plot_2.plot(k0[i,:], pen='r')
                sampleNum=np.linspace(dispData.startSample,dispData.endSample,dispData.numKlinPts)
                appObj.k0_plot_2.plot(sampleNum,klin, pen='b')                      
                appObj.interp_pdRaw_plot.plot(pd_interpRaw[i,:], pen='r')           
                appObj.interp_pdDispComp_plot.plot(np.abs(pd_interpDispComp[i,:]), pen='r')           
                appObj.alineNoInterp_plot.plot(alineMagNoInterp[i,:], pen='r')
                appObj.alineRaw_plot.plot(alineMagRaw[i,:], pen='r')
                appObj.alineDispComp_plot.plot(alineMagDispComp[i,:], pen='r')
            else:
                # limit plotting to first 10 or so triggers, otherwise this will freeze up
                nTrigs = min((numTrigs, 10))
                
                for i in range(nTrigs):
                    pen=(i,nTrigs)
                    appObj.pd_plot_2.plot(pdData[i,:], pen=pen)            
                    appObj.mzi_plot_2.plot(mziData[i,:], pen=pen)            
                    appObj.mzi_mag_plot_2.plot(mzi_mag[i,:], pen=pen)            
                    appObj.mzi_phase_plot_2.plot(mzi_ph[i,:], pen=pen)            
                    appObj.k0_plot_2.plot(k0[i,:], pen=pen)            
                    appObj.interp_pdRaw_plot.plot(pd_interpRaw[i,:], pen=pen)            
                    appObj.interp_pdDispComp_plot.plot(np.abs(pd_interpDispComp[i,:]), pen=pen)            
                    appObj.alineNoInterp_plot.plot(alineMagNoInterp[i,:], pen=pen)            
                    appObj.alineRaw_plot.plot(alineMagRaw[i,:], pen=pen)            
                    appObj.alineDispComp_plot.plot(alineMagDispComp[i,:], pen=pen)            
                
            appObj.alineRaw_plot.plot(peakXPos1,peakYPos1, pen=None, symbolBrush='k', symbolPen='b')
            appObj.alineDispComp_plot.plot(peakXPos,peakYPos, pen=None, symbolBrush='k', symbolPen='b')
            appObj.phaseNoiseTD_plot.plot(t,phaseNoiseTD, pen='r')
            appObj.phaseNoiseFD_plot.plot(freq,phaseNoiseFFT, pen='r')
            appObj.mzi_phase_plot_2.plot(mziDataNorm, pen='b')            
            appObj.mzi_phase_plot_2.plot(k0RippleNorm, pen='r')            
            
           
            # if you want to align the pd and the Mzi data
    #            plotPDPhase.plot(pdData[0,:], pen='r')
    #            plotPDPhase.plot(mziData[0,:], pen='b')
    
            appObj.dispWnfcMag_plot.plot(dispData.magWin, pen='b')
            appObj.dispWnfcPh_plot.plot(dispData.phaseCorr, pen='b')
            
            # plot filter cutoff ranges on the raw Aline plot
            yy=[np.min(alineMagRaw[0,:]),np.max(alineMagRaw[0,:])]
            xx0=[alineMagRaw.shape[1]*dispData.PDfilterCutoffs[0],alineMagRaw.shape[1]*dispData.PDfilterCutoffs[0]]        
            xx1=[alineMagRaw.shape[1]*dispData.PDfilterCutoffs[1],alineMagRaw.shape[1]*dispData.PDfilterCutoffs[1]]    
            appObj.alineRaw_plot.plot(xx0,yy, pen='b')
            appObj.alineRaw_plot.plot(xx1,yy, pen='b')
            
    #            # Now create a bscan image from the 1 aline, but sweep the shift value between the mzi and pd to see what works best
    #            nShift=201        
    #            bscan=np.zeros([nShift, alineMag.shape[0]])        
    #            for i in range(nShift):
    #                shift=i-(nShift-1)/2
    #                if shift<0:
    #                    mzi_data_temp=mzi_data[-1*shift:]
    #                    pd_data_temp=pd_data[0:mzi_data_temp.shape[0]]
    #    #                print(mzi_data_temp.shape,pd_data_temp.shape)
    #                elif shift>0:
    #                    pd_data_temp=pd_data[shift:]
    #                    mzi_data_temp=mzi_data[0:pd_data_temp.shape[0]]
    #    #                print(mzi_data_temp.shape,pd_data_temp.shape)
    #                elif shift==0:
    #                    pd_data_temp=pd_data
    #                    mzi_data_temp=mzi_data
    #                    
    #                mzi_hilbert, mzi_mag, mzi_ph, k0 = processMZI(mzi_data_temp)
    #                pd_interpRaw, pd_interpHanning, pd_fft, alineMag, alinePhase, klin = processPD(pd_data_temp, k0, klin_idx, numklinpts)
    #                bscan[i,:]=alineMag
    #    
    #            pl = self.bscan_plot
    #            pl.setImage(bscan)            
            
    #            print('alineMagDispComp ',alineMagDispComp.shape)
            if ~np.all(np.isnan(alineMagDispComp)):  # only make the bscan plot if there is data to show (this prevents an error from occurring)
                appObj.bscan_plot.setImage(alineMagDispComp)
            QtGui.QApplication.processEvents() # check for GUI events  
    except:
        traceback.print_exc(file=sys.stdout)
        QtGui.QMessageBox.critical (appObj, "Error", "Error during scan. Check command line output for details")
        
    appObj.JSOsaveDispersion_pushButton.setEnabled(False)    
    appObj.JSOloadDispersion_pushButton.setEnabled(True)
    appObj.isCollecting = False
    QtGui.QApplication.processEvents() # check for GUI events
    appObj.finishCollection()
Beispiel #9
0
        idx = n*3 + 2
        r = attenLvl % 2
        dataSig[idx-1:idx+2] = r
        attenLvl = attenLvl // 2
        
    # generate signal output as 32-bit number because thats what NIDAQ uses for writing digital 
    sig = np.zeros(numpts, dtype=np.uint32)
    loadMask = 1 << 2
    dataMask = 1 << 1
    clkMask = 1 << 0
    for n in range(0, numpts):
        sig[n] = (loadMask*loadSig[n]) | (dataMask*dataSig[n]) | (clkMask*clkSig[n])
        
    return sig

    
if __name__ == "__main__":
    from DAQHardware import DAQHardware
    
    sig = makeLM1972AttenSig(30)
    for n in range(0, len(sig)):
        print("%10x" % sig[n])    
        
    daqHW = DAQHardware()
    audioHW = AudioHardware()
    
    # outLines = audioHW.attenL_daqChan
    # lf.attenL_daqChan = "Dev1/line1:3"
    outLines = "PXI1Slot2/port0/line1:3"
    daqHW.sendDigOutCmd(outLines, sig)
    
Beispiel #10
0
def calibrateClick(appObj, testMode=False):
    print("ABR.calibrateClick")

    appObj.tabWidget.setCurrentIndex(3)
    appObj.doneFlag = False
    appObj.isCollecting = True
    # trigRate = octfpga.GetTriggerRate()
    audioHW = appObj.audioHW
    outputRate = audioHW.DAQOutputRate
    inputRate = audioHW.DAQInputRate

    # freq_array2 = audioParams.freq[1, :]
    freqArray = appObj.getFrequencyArray()

    if testMode:
        testDataDir = os.path.join(appObj.basePath, 'exampledata',
                                   'Speaker Calibration')


#        filePath = os.path.join(testDataDir, 'AudioParams.pickle')
#        f = open(filePath, 'rb')
#        audioParams = pickle.load(f)
#        f.close()
    else:
        pass

    # numSpk = audioParams.getNumSpeakers()
    if not testMode:
        from DAQHardware import DAQHardware
        daq = DAQHardware()

    chanNamesIn = [audioHW.mic_daqChan]
    micVoltsPerPascal = audioHW.micVoltsPerPascal
    ABRparams = ABRParams(appObj)
    ABRparams.click = True
    ABRparams.nReps = 20
    print("ABR.calibrateClick ABRparams=", ABRparams.__dict__)
    # set input rate to three times the highest output frequency, to allow plus a

    #    inputRate = 3*freqArray[-1]
    #    print("runABR: outputRate= ", outputRate, " inputRate= ", inputRate)
    #    inputRate = np.max((inputRate, 6e3))   # inpute rate should be at least 6 kHz because ABR responses occur 0.3 - 3 kHz
    #    inputRate = outputRate / int(np.floor(outputRate / inputRate))  # pick closest input rate that evenly divides output rate
    #    print("runABR: inputRate(final)= ", inputRate)

    try:
        chanNameOut = audioHW.speakerL_daqChan
        attenLines = audioHW.attenL_daqChan

        spkOut_trial = makeABROutput(4e3, ABRparams, audioHW)
        spkOut = np.tile(spkOut_trial, ABRparams.nReps)
        npts = len(spkOut_trial)
        tOut = np.linspace(0, npts / outputRate, npts)

        # attenSig = AudioHardware.makeLM1972AttenSig(0)
        if not testMode:
            AudioHardware.Attenuator.setLevel(0, attenLines)

        pl = appObj.ABR_output
        pl.clear()
        endIdx = int(5e-3 * outputRate)  # only plot first 5 ms
        #pl.plot(t[0:endIdx], spkOut[0:endIdx], pen='b')
        pl.plot(tOut, spkOut_trial, pen='b')

        labelStyle = appObj.xLblStyle
        pl.setLabel('bottom', 'Time', 's', **labelStyle)
        labelStyle = appObj.yLblStyle
        pl.setLabel('left', 'Output', 'V', **labelStyle)

        numInputSamples = int(inputRate * len(spkOut) / outputRate)

        if testMode:
            # mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)
            pass
        else:
            # setup the output task
            daq.setupAnalogOutput([chanNameOut], audioHW.daqTrigChanIn,
                                  int(outputRate), spkOut)
            daq.startAnalogOutput()

            # setup the input task
            daq.setupAnalogInput(chanNamesIn, audioHW.daqTrigChanIn,
                                 int(inputRate), numInputSamples)
            daq.startAnalogInput()

            # trigger the acquiisiton by sending ditital pulse
            daq.sendDigTrig(audioHW.daqTrigChanOut)

            timeout = numInputSamples / inputRate + 2
            dataIn = daq.readAnalogInput(timeout)
            mic_data = dataIn[0, :]

            mic_data = mic_data / micVoltsPerPascal

            daq.waitDoneInput()
            daq.stopAnalogInput()
            daq.clearAnalogInput()

            daq.waitDoneOutput(stopAndClear=True)

        print("ABR.calibrateClick: plotting data")
        npts = len(mic_data)

        # reshape and average the mic data
        ptsPerRep = npts // ABRparams.nReps
        mic_data = np.reshape(mic_data, (ABRparams.nReps, ptsPerRep))
        mic_data = np.mean(mic_data, 0)

        # plot mic data
        npts = len(mic_data)
        t = np.linspace(0, npts / inputRate, npts)
        pl = appObj.ABR_micInput
        pl.clear()
        pl.plot(t, mic_data, pen='b')

        labelStyle = appObj.xLblStyle
        pl.setLabel('bottom', 'Time', 's', **labelStyle)
        labelStyle = appObj.yLblStyle
        pl.setLabel('left', 'Response', 'Pa', **labelStyle)

        idx1 = round(inputRate * ABRparams.stimOffset)
        idx2 = idx1 + round(inputRate * ABRparams.stimDur)

        mic_data = mic_data[idx1:idx2]
        # apply high pass filter to get rid of LF components
        #        (b, a) = scipy.signal.butter(5, 100/inputRate, 'high')
        #        mic_data = scipy.signal.lfilter(b, a, mic_data)

        rms = np.mean(mic_data**2)**0.5
        rms = 20 * np.log10(rms / 2e-5)
        appObj.ABRclick_RMS = rms

        appObj.ABR_rms_label.setText("%0.1f dB" % rms)
        print("ABR.calibrateClick: RMS= ", rms)

    except Exception as ex:
        traceback.print_exc(file=sys.stdout)
        QtGui.QMessageBox.critical(
            appObj, "Error",
            "Error during collection. Check command line output for details")

    appObj.isCollecting = False
    QtGui.QApplication.processEvents(
    )  # check for GUI events, such as button presses
    appObj.finishCollection()
Beispiel #11
0
def calibrateScanMirror(appObj):
    DebugLog.log("calibrateScanMirror")    
    appObj.tabWidget.setCurrentIndex(7)
    appObj.doneFlag = False
    appObj.isCollecting = True
    appObj.JSOsaveDispersion_pushButton.setEnabled(True)
    appObj.JSOloadDispersion_pushButton.setEnabled(False)

    if not appObj.oct_hw.IsOCTTestingMode():     # prepare to get new data            
        from DAQHardware import DAQHardware
        daq = DAQHardware()
    audioHW=appObj.audioHW
    mirrorDriver = appObj.mirrorDriver    
    chanNames = [mirrorDriver.X_daqChan, mirrorDriver.Y_daqChan]
    trigChan = audioHW.daqTrigChanIn   #use the audio trigger to start the scan
    outputRate = mirrorDriver.DAQoutputRate

    while appObj.doneFlag == False:        # keep running until the button is turned off 
        scanParams = appObj.getScanParams()
       #    create scan pattern to drive the mirrors
        mode=appObj.scanShape_comboBox.currentIndex()
        print('mode',mode)
        if mode==0:   # create a spiral scan using fast (resonant) scanning
            Vmaxx=mirrorDriver.voltRange[1] # maximum voltage for MEMS mirror for x-axis
            Vmaxy=mirrorDriver.voltRange[1] # maximum voltage for MEMS mirror for y-axis
            xAdjust = 1    
            yAdjust = scanParams.skewResonant
            phaseShift = scanParams.phaseAdjust
            fr = mirrorDriver.resonantFreq  # angular scan rate (frequency of one rotation - resonant frequency)
            fv = scanParams.volScanFreq     # plotParam scan frequency, which scans in and then out, which is actually two volumes
            DebugLog.log("freq of one rotation (fr)= %d; scan frequency (fv)= %d" % (fr, fv))
            diameter = scanParams.length
            voltsPerMM = mirrorDriver.voltsPerMillimeterResonant
            A1=(Vmaxx/2)/xAdjust
            A2=(Vmaxy/2)/yAdjust
            A3=voltsPerMM*diameter/2 
            A=np.min([A1,A2,A3])           
            fs=mirrorDriver.DAQoutputRate   # galvo output sampling rate
            t=np.arange(0,np.around(fs/fv))*1/fs  # t is the array of times for the DAQ output to the mirrors
            r=1/2*(1-np.cos(2*np.pi*fv*t))            
            x=xAdjust*A*r*np.cos(2*np.pi*fr*t) # x and y are the coordinates of the laser at each point in time
            y=yAdjust*A*r*np.sin(2*np.pi*fr*t+phaseShift*np.pi/180)
            mirrorOut= np.vstack((x,y))
            
        elif mode==1:   # create a square scan using slow parameters
            Vmaxx=mirrorDriver.voltRange[1] # maximum voltage for MEMS mirror for x-axis
            Vmaxy=mirrorDriver.voltRange[1] # maximum voltage for MEMS mirror for y-axis
            xAdjust = 1    
            yAdjust = scanParams.skewNonResonant
            diameter = scanParams.length
            voltsPerMMX = mirrorDriver.voltsPerMillimeter*xAdjust
            voltsPerMMY = mirrorDriver.voltsPerMillimeter*yAdjust
            if ((diameter/2)*voltsPerMMX)>Vmaxx:
                diameter=2*Vmaxx/voltsPerMMX
            if ((diameter/2)*voltsPerMMY)>Vmaxy:
                diameter=2*Vmaxy/voltsPerMMY
            freq = appObj.cal_freq_dblSpinBox.value()
            if freq>mirrorDriver.LPFcutoff:  # can't go faster than the maximum scan rate
                appObj.cal_freq_dblSpinBox.setValue(mirrorDriver.LPFcutoff)
            fs=mirrorDriver.DAQoutputRate   # galvo output sampling rate
            t1=np.arange(0,np.around(fs/freq))*1/fs  
            n=np.around(t1.shape[0]/4)-1   # number of points in each 4th of the cycle (reduce by 1 to make it easy to shorten t1)
            t=t1[0:4*n]  # t is the array of times for the DAQ output to the mirrors
            cornerX=(diameter/2)*voltsPerMMX     # voltage at each corner of the square            
            cornerY=(diameter/2)*voltsPerMMY     # voltage at each corner of the square            
            
            # x and y are the coordinates of the laser at each point in time
            x=np.zeros(t.shape)            
            y=np.zeros(t.shape)            
            x[0:n]=np.linspace(-cornerX,cornerX,n)
            y[0:n]=-cornerY
            x[n:2*n]=cornerX
            y[n:2*n]=np.linspace(-cornerY,cornerY,n)
            x[2*n:3*n]=np.linspace(cornerX,-cornerX,n)
            y[2*n:3*n]=cornerY
            x[3*n:4*n]=-cornerX
            y[3*n:4*n]=np.linspace(cornerY,-cornerY,n)
            mirrorOut1= np.vstack((x,y))
            if mirrorDriver.MEMS==True:
                mirrorOut=scipy.signal.filtfilt(mirrorDriver.b_filt,mirrorDriver.a_filt,mirrorOut1)           
            else:
                mirrorOut=mirrorOut1    

        # plot mirror commands to GUI 
        pl = appObj.JSOmisc_plot1
        npts = mirrorOut.shape[1]
        t = np.linspace(0, npts/outputRate, npts)
        pl.clear()
        pl.plot(t, mirrorOut[0, :], pen='b')  
        pl.plot(t, mirrorOut[1, :], pen='r')  
        labelStyle = appObj.xLblStyle
        pl.setLabel('bottom', 'Time', 's', **labelStyle)
        labelStyle = appObj.yLblStyle
        pl.setLabel('left', 'Output', 'V', **labelStyle)
    
        pl2=appObj.JSOmisc_plot2
        pl2.clear()
        pl2.plot(mirrorOut[0, :],mirrorOut[1, :], pen='b')
        labelStyle = appObj.xLblStyle
        pl2.setLabel('bottom', 'X galvo', 'V', **labelStyle)
        labelStyle = appObj.yLblStyle
        pl2.setLabel('left', 'Y galvo', 'V', **labelStyle)
     
        if not appObj.oct_hw.IsDAQTestingMode():
            # setup the analog output DAQ device
            daq.setupAnalogOutput(chanNames, trigChan, outputRate, mirrorOut.transpose())        
            daq.startAnalogOutput()
            
            #start trigger and wait for output to finish 
            daq.sendDigTrig(audioHW.daqTrigChanOut)
            daq.waitDoneOutput(timeout=3, stopAndClear=True)
            
            QtGui.QApplication.processEvents() # check for GUI events
        else:
            appObj.doneFlag = True      # just run one time through if in test mode
            appObj.CalibrateScanMirror_pushButton.setChecked(False)
                  
    # when testing is over, set the mirror position to (0,0)
    if not appObj.oct_hw.IsDAQTestingMode():
        chanNames = [mirrorDriver.X_daqChan, mirrorDriver.Y_daqChan]
        data = np.zeros(2)
        daq.writeValues(chanNames, data)
    
    appObj.JSOsaveDispersion_pushButton.setEnabled(False)    
    appObj.JSOloadDispersion_pushButton.setEnabled(True)        
    appObj.isCollecting = False
    appObj.finishCollection()
Beispiel #12
0
def runABR(appObj, testMode=False):
    print("runABR")

    appObj.tabWidget.setCurrentIndex(3)
    appObj.doneFlag = False
    appObj.isCollecting = True
    # trigRate = octfpga.GetTriggerRate()
    audioHW = appObj.audioHW
    bioamp = appObj.bioamp
    outputRate = audioHW.DAQOutputRate
    inputRate = audioHW.DAQInputRate

    # freq_array2 = audioParams.freq[1, :]
    freqArray = appObj.getFrequencyArray()
    ABRparams = ABRParams(appObj)

    if testMode:
        testDataDir = os.path.join(appObj.basePath, 'exampledata',
                                   'Speaker Calibration')
#        filePath = os.path.join(testDataDir, 'AudioParams.pickle')
#        f = open(filePath, 'rb')
#        audioParams = pickle.load(f)
#        f.close()
    else:
        # freqArray = appObj.getFrequencyArray()
        i1 = appObj.ABR_freqLow_comboBox.currentIndex()
        i2 = appObj.ABR_freqHigh_comboBox.currentIndex()
        print("runABR: i1= ", i1, "i2= ", i2)
        ampLow = appObj.ABRampLow_spinBox.value()
        ampHigh = appObj.ABRampHigh_spinBox.value()
        ampDelta = appObj.ABRampDelta_spinBox.value()

        # ampArray = np.arange(ampLow, ampHigh, ampDelta)
        #numSteps = np.floor((ampHigh - ampLow)/ampDelta) + 1
        #ampArray = np.linspace(ampLow, ampHigh, numSteps)
        if ampLow == ampHigh:
            ampArray = np.array([ampLow])
        else:
            ampArray = np.arange(ampLow, ampHigh, ampDelta)
            if ampArray[-1] != ampHigh:
                ampArray = np.hstack((ampArray, ampHigh))

        freqArray = freqArray[i1:i2 + 1]

    if ABRparams.click:
        freqArray = freqArray[0:1]  # only use single freqeucny
        clickRMS = appObj.ABRclick_RMS

    # numSpk = audioParams.getNumSpeakers()
    if not testMode:
        from DAQHardware import DAQHardware
        daq = DAQHardware()

    chanNamesIn = [audioHW.mic_daqChan, bioamp.daqChan]
    micVoltsPerPascal = audioHW.micVoltsPerPascal

    # set input rate to three times the highest output frequency, to allow plus a

    #    inputRate = 3*freqArray[-1]
    #    print("runABR: outputRate= ", outputRate, " inputRate= ", inputRate)
    #    inputRate = np.max((inputRate, 6e3))   # inpute rate should be at least 6 kHz because ABR responses occur 0.3 - 3 kHz
    #    inputRate = outputRate / int(np.floor(outputRate / inputRate))  # pick closest input rate that evenly divides output rate
    #    print("runABR: inputRate(final)= ", inputRate)

    try:
        frameNum = 0
        numFrames = len(freqArray) * len(ampArray)
        isSaveDirInit = False
        chanNameOut = audioHW.speakerL_daqChan
        attenLines = audioHW.attenL_daqChan

        freq_idx = 0
        ABRdata = None
        appObj.status_label.setText("Running")
        appObj.progressBar.setValue(0)

        for freq in freqArray:
            spkOut_trial = makeABROutput(freq, ABRparams, audioHW)
            npts = len(spkOut_trial)
            spkOut = np.tile(spkOut_trial, ABRparams.nReps)
            # invert every other trial, necessary for ABR/CAP output
            for n in range(1, ABRparams.nReps, 2):
                idx1 = n * npts
                idx2 = (n + 1) * npts
                spkOut[idx1:idx2] = -spkOut[idx1:idx2]
#            plt.figure(5)
#            plt.clf()
#            plt.plot(spkOut)
            tOut = np.linspace(0, npts / outputRate, npts)
            print("runABR npts=%d len(spkOut_trial)= %d len(tOut)= %d" %
                  (npts, len(spkOut_trial), len(tOut)))
            amp_idx = 0
            ptsPerRep = int(inputRate * ABRparams.trialDur)

            for amp in ampArray:
                print("runABR freq=" + repr(freq), " amp= ", +amp,
                      " freq_idx= ", freq_idx, " amp_idx= ", amp_idx)
                if ABRparams.click:
                    clickRMS = appObj.ABRclick_RMS
                    attenLvl = 0
                    vOut = 10**((amp - clickRMS) / 20)
                    minV = audioHW.speakerOutputRng[0]
                    if vOut < minV:
                        attenLvl = int(round(20 * np.log10(minV / vOut)))
                        vOut = minV
                else:
                    vOut, attenLvl = audioHW.getCalibratedOutputVoltageAndAttenLevel(
                        freq, amp, 0)

                print("runABR vOut= ", vOut, " atenLvl=", attenLvl)

                if vOut > audioHW.speakerOutputRng[1]:
                    print("runABR vOut= ", vOut, "  out of range")
                    continue
                elif attenLvl > audioHW.maxAtten:
                    print("runABR attenLvl= ", attenLvl,
                          "  gerater than maximum attenuation")
                    continue

                # attenSig = AudioHardware.makeLM1972AttenSig(0)
                if not testMode:
                    AudioHardware.Attenuator.setLevel(attenLvl, attenLines)
                    # daq.sendDigOutABRd(attenLines, attenSig)
                    # appObj.oct_hw.SetAttenLevel(0, attenLines)

                pl = appObj.ABR_output
                pl.clear()
                endIdx = int(5e-3 * outputRate)  # only plot first 5 ms
                #pl.plot(t[0:endIdx], spkOut[0:endIdx], pen='b')
                pl.plot(tOut, spkOut_trial, pen='b')

                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Time', 's', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Output', 'V', **labelStyle)

                numInputSamples = ABRparams.nReps * int(
                    inputRate * len(spkOut_trial) / outputRate)

                if testMode:
                    # mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)
                    pass
                else:

                    # setup the output task
                    daq.setupAnalogOutput([chanNameOut], audioHW.daqTrigChanIn,
                                          int(outputRate), vOut * spkOut)
                    daq.startAnalogOutput()

                    # setup the input task
                    daq.setupAnalogInput(chanNamesIn, audioHW.daqTrigChanIn,
                                         int(inputRate), numInputSamples)
                    daq.startAnalogInput()

                    # trigger the acquiisiton by sending ditital pulse
                    daq.sendDigTrig(audioHW.daqTrigChanOut)

                    timeout = numInputSamples / inputRate + 2
                    dataIn = daq.readAnalogInput(timeout)
                    mic_data = dataIn[0, :]
                    bioamp_data = dataIn[1, :]

                    mic_data = mic_data / micVoltsPerPascal
                    bioamp_data = bioamp_data / bioamp.gain

                    daq.waitDoneInput()
                    daq.stopAnalogInput()
                    daq.clearAnalogInput()

                    daq.waitDoneOutput(stopAndClear=True)


#                npts = len(mic_data)
#                t = np.linspace(0, npts/inputRate, npts)
#                pl = appObj.ABR_micInput
#                pl.clear()
#                pl.plot(t, mic_data, pen='b')
#
#                labelStyle = appObj.xLblStyle
#                pl.setLabel('bottom', 'Time', 's', **labelStyle)
#                labelStyle = appObj.yLblStyle
#                pl.setLabel('left', 'Response', 'Pa', **labelStyle)

# def processABRData(mic_data, bioamp_data, nReps, freq, amp_idx, inputRate, ABRdataIn):
                ABRptData, ABRdata = processABRData(mic_data, bioamp_data,
                                                    freq, freq_idx, amp_idx,
                                                    freqArray, ampArray,
                                                    inputRate, ABRdata,
                                                    ABRparams)

                print("runABR: plotting data")
                plotABRdata(appObj, ABRptData, ABRdata)

                #                if appObj.getSaveState():
                #                    if not isSaveDirInit:
                #                        saveDir = OCTCommon.initSaveDir(saveOpts, 'Speaker Calibration', audioParams=audioParams)
                #                        isSaveDirInit = True
                #
                #                    if saveOpts.saveRaw:
                #                        OCTCommon.saveRawData(mic_data, saveDir, frameNum, dataType=3)
                idx1 = round(inputRate * ABRparams.stimOffset)
                idx2 = idx1 + round(inputRate * ABRparams.stimDur)

                mic_data = mic_data[idx1:idx2]
                rms = np.mean(mic_data**2)**0.5
                rms = 20 * np.log10(rms / 2e-5)

                appObj.ABR_rms_label.setText("%0.1f dB" % rms)

                QtGui.QApplication.processEvents(
                )  # check for GUI events, such as button presses

                # if done flag, break out of loop
                if appObj.doneFlag:
                    break

                frameNum += 1
                amp_idx += 1
                appObj.progressBar.setValue(frameNum / numFrames)

            # if done flag, break out of loop
            if appObj.doneFlag:
                break

            freq_idx += 1

        saveOpts = appObj.getSaveOpts()
        workbook = appObj.excelWB
        note = saveOpts.note
        number = appObj.ABRnumber
        name = 'ABR'
        d = datetime.datetime.now()
        timeStr = d.strftime('%H_%M_%S')
        excelWS = CMPCommon.initExcelSpreadsheet(workbook, name, number,
                                                 timeStr, note)

        appObj.ABRnumber += 1
        #saveOpts.saveTracings = appObj.ABR_saveTracings_checkBox.isChecked()
        saveOpts.saveTracings = True
        saveDir = appObj.saveDir_lineEdit.text()
        saveABRDataXLS(ABRdata, ABRparams, excelWS, saveOpts)
        #saveABRData(ABRdata, trialDur, nReps, appObj.saveFileTxt_filepath, saveOpts, timeStr)

        plotName = 'ABR %d %s %s' % (number, timeStr, saveOpts.note)
        saveABRDataFig(ABRdata, ABRparams, saveDir, plotName, timeStr)
        saveABRDataPickle(ABRdata, ABRparams, plotName, saveOpts, timeStr)

    except Exception as ex:
        traceback.print_exc(file=sys.stdout)
        QtGui.QMessageBox.critical(
            appObj, "Error",
            "Error during collection. Check command line output for details")

    # update the audio hardware speaker calibration
    appObj.isCollecting = False
    QtGui.QApplication.processEvents(
    )  # check for GUI events, such as button presses
    appObj.finishCollection()
Beispiel #13
0
def runSpeakerCal(appObj, testMode=False):
    DebugLog.log("runSpeakerCal")
    appObj.tabWidget.setCurrentIndex(1)
    appObj.doneFlag = False
    appObj.isCollecting = True
    # trigRate = octfpga.GetTriggerRate()
    audioHW = appObj.audioHW
    outputRate = audioHW.DAQOutputRate
    inputRate = audioHW.DAQInputRate
    
    if testMode:
        testDataDir = os.path.join(appObj.basePath, 'exampledata', 'Speaker Calibration')
        filePath = os.path.join(testDataDir, 'AudioParams.pickle')
        f = open(filePath, 'rb')
        audioParams = pickle.load(f)
        f.close()
    else:
        audioParams = appObj.getAudioParams()
    numSpk = audioParams.getNumSpeakers()
    
    if not testMode:
        from DAQHardware import DAQHardware
        daq = DAQHardware()

    chanNamesIn= [ audioHW.mic_daqChan]
    micVoltsPerPascal = audioHW.micVoltsPerPascal


    spCal = None
    freq_array2 = audioParams.freq[1, :]
    try:
        frameNum = 0
        isSaveDirInit = False
        saveOpts = appObj.getSaveOpts()
        for spkNum in range(0, numSpk):
            chanNameOut = audioHW.speakerL_daqChan 
            attenLines = audioHW.attenL_daqChan
            spkIdx = 0
                
            if (numSpk == 1 and audioParams.speakerSel == Speaker.RIGHT) or spkNum == 2:
                chanNameOut = audioHW.speakerR_daqChan
                attenLines = audioHW.attenR_daqChan
                spkIdx = 1
    
            freq_array = audioParams.freq[spkIdx, :]
            if (audioParams.stimType == AudioStimType.TWO_TONE_DP) and (numSpk == 1):
                freq_array = np.concatenate((freq_array, freq_array2))
                freq_array = np.sort(freq_array)
                freq_array2 = freq_array
                
            if spCal is None:
                spCal = SpeakerCalData(np.vstack((freq_array, freq_array2)))
                
            DebugLog.log("runSpeakerCal freq_array=" + repr(freq_array))
            freq_idx = 0

            attenSig = AudioHardware.makeLM1972AttenSig(0)
            
            if not testMode:
                # daq.sendDigOutCmd(attenLines, attenSig)
                appObj.oct_hw.SetAttenLevel(0, attenLines)
            
            for freq in freq_array:
                DebugLog.log("runSpeakerCal freq=" + repr(freq))
                spkOut = makeSpeakerCalibrationOutput(freq, audioHW, audioParams)    
                npts = len(spkOut)
                t = np.linspace(0, npts/outputRate, npts)
                
                pl = appObj.plot_spkOut
                pl.clear()
                endIdx = int(5e-3 * outputRate)        # only plot first 5 ms
                pl.plot(t[0:endIdx], spkOut[0:endIdx], pen='b')
                        
                numInputSamples = int(inputRate*len(spkOut)/outputRate) 
                
                if testMode:
                    mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)                    
                else:

                    # setup the output task
                    daq.setupAnalogOutput([chanNameOut], audioHW.daqTrigChanIn, int(outputRate), spkOut)
                    daq.startAnalogOutput()
                    
                    # setup the input task
                    daq.setupAnalogInput(chanNamesIn, audioHW.daqTrigChanIn, int(inputRate), numInputSamples) 
                    daq.startAnalogInput()
                
                    # trigger the acquiisiton by sending ditital pulse
                    daq.sendDigTrig(audioHW.daqTrigChanOut)
                    
                    mic_data = daq.readAnalogInput()
                    mic_data = mic_data/micVoltsPerPascal

                
                if not testMode:
                    daq.stopAnalogInput()
                    daq.stopAnalogOutput()
                    daq.clearAnalogInput()
                    daq.clearAnalogOutput()
                
                npts = len(mic_data)
                t = np.linspace(0, npts/inputRate, npts)
                pl = appObj.plot_micRaw
                pl.clear()
                pl.plot(t, mic_data, pen='b')
                
                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Time', 's', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Response', 'Pa', **labelStyle)
                
                micData, spCal = processSpkCalData(mic_data, freq*1000, freq_idx, audioParams, inputRate, spCal, spkIdx)
                
                pl = appObj.plot_micFFT
                pl.clear()
                df = micData.fft_freq[1] - micData.fft_freq[0]
                nf = len(micData.fft_freq)
                i1 = int(1000*freq_array[0]*0.9/df)
                i2 = int(1000*freq_array[-1]*1.1/df)
                DebugLog.log("SpeakerCalibration: df= %0.3f i1= %d i2= %d nf= %d" % (df, i1, i2, nf))
                pl.plot(micData.fft_freq[i1:i2], micData.fft_mag[i1:i2], pen='b')
                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Frequency', 'Hz', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Magnitude', 'db SPL', **labelStyle)
                
                pl = appObj.plot_micMagResp
                pl.clear()
#                pl.plot(1000*spCal.freq[spkIdx, :], spCal.magResp[spkIdx, :], pen="b", symbol='o')
                pl.plot(freq_array, spCal.magResp[spkIdx, :], pen="b", symbol='o')
                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Frequency', 'Hz', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Magnitude', 'db SPL', **labelStyle)
                
                freq_idx += 1
                
                if appObj.getSaveState():
                    if not isSaveDirInit:
                        saveDir = OCTCommon.initSaveDir(saveOpts, 'Speaker Calibration', audioParams=audioParams)
                        isSaveDirInit = True
    
                    if saveOpts.saveRaw:
                        OCTCommon.saveRawData(mic_data, saveDir, frameNum, dataType=3)
                    
                QtGui.QApplication.processEvents() # check for GUI events, such as button presses
                
                # if done flag, break out of loop
                if appObj.doneFlag:
                    break
                
                frameNum += 1

                
            # if done flag, break out of loop
            if appObj.doneFlag:
                break
                
        if not appObj.doneFlag:
            saveDir = appObj.settingsPath
            saveSpeakerCal(spCal, saveDir)
            appObj.audioHW.loadSpeakerCalFromProcData(spCal)
            appObj.spCal = spCal            
            
    except Exception as ex:
        traceback.print_exc(file=sys.stdout)
        QtGui.QMessageBox.critical (appObj, "Error", "Error during scan. Check command line output for details")           
        
    8# update the audio hardware speaker calibration                     
    appObj.isCollecting = False
    QtGui.QApplication.processEvents() # check for GUI events, such as button presses
    appObj.finishCollection()


    
Beispiel #14
0
def runSpeakerCal(appObj, testMode=False):
    print("runSpeakerCal")
    appObj.tabWidget.setCurrentIndex(0)
    appObj.doneFlag = False
    appObj.isCollecting = True
    # trigRate = octfpga.GetTriggerRate()
    audioHW = appObj.audioHW
    outputRate = audioHW.DAQOutputRate
    inputRate = audioHW.DAQInputRate

    if testMode:
        testDataDir = os.path.join(appObj.basePath, 'exampledata',
                                   'Speaker Calibration')
#        filePath = os.path.join(testDataDir, 'AudioParams.pickle')
#        f = open(filePath, 'rb')
#        audioParams = pickle.load(f)
#        f.close()
    else:
        freqArray = appObj.getFrequencyArray()

    # numSpk = audioParams.getNumSpeakers()
    numSpk = 1
    cIdx = appObj.speaker_comboBox.currentIndex()
    if cIdx > 0:
        numSpk = 2

    if not testMode:
        from DAQHardware import DAQHardware
        daq = DAQHardware()

    chanNamesIn = [audioHW.mic_daqChan]
    micVoltsPerPascal = audioHW.micVoltsPerPascal
    # mode = 'chirp'
    mode = ''
    spCal = None
    # freq_array2 = audioParams.freq[1, :]

    try:
        frameNum = 0
        isSaveDirInit = False
        trialDur = appObj.spCal_stimDuration_dblSpinBox.value() * 1e-3

        freq_array = freqArray
        freq_array2 = freqArray / 1.22

        if numSpk == 1:
            freq_array = np.concatenate((freq_array, freq_array2))
            freq_array = np.sort(freq_array)
            freq_array2 = freq_array

        spCal = SpeakerCalData(np.vstack((freq_array, freq_array2)))

        for spkNum in range(0, numSpk):
            chanNameOut = audioHW.speakerL_daqChan
            #attenLines = audioHW.attenL_daqChan
            #attenLinesOther = audioHW.attenR_daqChan
            spkIdx = 0

            attenLvl1 = 0
            attenLvl2 = audioHW.maxAtten

            if spkNum == 2:
                #chanNameOut = audioHW.speakerR_daqChan
                #attenLines = audioHW.attenR_daqChan
                #attenLinesOther = audioHW.attenL_daqChan
                spkIdx = 1
                attenLvl1 = audioHW.maxAtten
                attenLvl2 = 0

            freq_idx = 0

            if not testMode:
                audioHW.setAttenuatorLevel(attenLvl1, attenLvl2, daq)

                # daq.sendDigOutCmd(attenLines, attenSig)
                # appObj.oct_hw.SetAttenLevel(0, attenLines)

            if mode == 'chirp':
                tChirp = 1
                f0 = 100
                f1 = 100e3
                k = (f1 - f0) / tChirp
                nChirpPts = round(outputRate * tChirp)
                t = np.linspace(0, tChirp, nChirpPts)
                spkOut = np.cos(2 * np.pi * (f1 * t + (k / 2) * t**2))

                pl = appObj.spCal_output
                pl.clear()
                endIdx = int(5e-3 * outputRate)  # only plot first 5 ms
                pl.plot(t[0:endIdx], spkOut[0:endIdx], pen='b')

                numInputSamples = int(inputRate * len(spkOut) / outputRate)

                if testMode:
                    # mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)
                    pass
                else:
                    daq.setupAnalogOutput([chanNameOut], audioHW.daqTrigChanIn,
                                          int(outputRate), spkOut)
                    daq.startAnalogOutput()

                    # setup the input task
                    daq.setupAnalogInput(chanNamesIn, audioHW.daqTrigChanIn,
                                         int(inputRate), numInputSamples)
                    daq.startAnalogInput()

                    # trigger the acquiisiton by sending ditital pulse
                    daq.sendDigTrig(audioHW.daqTrigChanOut)

                    mic_data = daq.readAnalogInput()
                    mic_data = mic_data[0, :]
                    mic_data_chirp = mic_data / micVoltsPerPascal

                if not testMode:
                    daq.waitDoneOutput(stopAndClear=True)
                    daq.stopAnalogInput()
                    daq.clearAnalogInput()

                npts = len(mic_data)
                t = np.linspace(0, npts / inputRate, npts)
                pl = appObj.spCal_micInput
                pl.clear()
                pl.plot(t, mic_data, pen='b')

                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Time', 's', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Response', 'Pa', **labelStyle)

                # play refernce tone
                refFreq = 4e3
                tRef = 50e-3

                nRefPts = round(outputRate * tRef)
                t = np.linspace(0, tRef, nRefPts)
                spkOut = np.cos(2 * np.pi * refFreq * t)

                # apply envelope
                i1 = round(outputRate * 1e-3)
                i2 = nRefPts - i1
                env = np.linspace(0, 1, i1)
                spkOut[0:i1] = spkOut[0:i1] * env
                spkOut[i2:] = spkOut[i2:] * (1 - env)

                if testMode:
                    # mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)
                    pass
                else:
                    daq.setupAnalogOutput([chanNameOut], audioHW.daqTrigChanIn,
                                          int(outputRate), spkOut)
                    daq.startAnalogOutput()

                    # setup the input task
                    daq.setupAnalogInput(chanNamesIn, audioHW.daqTrigChanIn,
                                         int(inputRate), numInputSamples)
                    daq.startAnalogInput()

                    # trigger the acquiisiton by sending ditital pulse
                    daq.sendDigTrig(audioHW.daqTrigChanOut)

                    mic_data = daq.readAnalogInput()
                    mic_data_ref = mic_data / micVoltsPerPascal

                if not testMode:
                    daq.waitDoneOutput(stopAndClear=True)
                    daq.stopAnalogInput()
                    daq.clearAnalogInput()

                micData, spCal = processSpkCalDataChirp(
                    mic_data_chirp, mic_data_ref, inputRate, spCal, spkIdx, f0,
                    f1, refFreq)

                pl = appObj.spCal_micFFT
                pl.clear()
                df = micData.fft_freq[1] - micData.fft_freq[0]
                nf = len(micData.fft_freq)
                i1 = int(freq_array[0] * 0.9 / df)
                i2 = int(freq_array[-1] * 1.1 / df)
                print("SpeakerCalibration: df= %0.3f i1= %d i2= %d nf= %d" %
                      (df, i1, i2, nf))
                pl.plot(micData.fft_freq[i1:i2],
                        micData.fft_mag[i1:i2],
                        pen='b')
                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Frequency', 'Hz', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Magnitude', 'db SPL', **labelStyle)

                pl = appObj.spCal_spkResp
                pl.clear()
                #                pl.plot(1000*spCal.freq[spkIdx, :], spCal.magResp[spkIdx, :], pen="b", symbol='o')
                pl.plot(freq_array,
                        spCal.magResp[spkIdx, :],
                        pen="b",
                        symbol='o')
                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Frequency', 'Hz', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Magnitude', 'db SPL', **labelStyle)
            else:
                for freq in freq_array:
                    print("runSpeakerCal freq=" + repr(freq))
                    spkOut = makeSpeakerCalibrationOutput(
                        freq, audioHW, trialDur)
                    npts = len(spkOut)
                    t = np.linspace(0, npts / outputRate, npts)

                    pl = appObj.spCal_output
                    pl.clear()
                    endIdx = int(5e-3 * outputRate)  # only plot first 5 ms
                    pl.plot(t[0:endIdx], spkOut[0:endIdx], pen='b')

                    numInputSamples = int(inputRate * len(spkOut) / outputRate)

                    if testMode:
                        # mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)
                        pass
                    else:

                        # setup the output task
                        daq.setupAnalogOutput([chanNameOut],
                                              audioHW.daqTrigChanIn,
                                              int(outputRate), spkOut)
                        daq.startAnalogOutput()

                        # setup the input task
                        daq.setupAnalogInput(chanNamesIn,
                                             audioHW.daqTrigChanIn,
                                             int(inputRate), numInputSamples)
                        daq.startAnalogInput()

                        # trigger the acquiisiton by sending ditital pulse
                        daq.sendDigTrig(audioHW.daqTrigChanOut)

                        mic_data = daq.readAnalogInput()
                        mic_data = mic_data[0, :]
                        mic_data = mic_data / micVoltsPerPascal

                    if not testMode:
                        daq.stopAnalogInput()
                        daq.stopAnalogOutput()
                        daq.clearAnalogInput()
                        daq.clearAnalogOutput()

                    npts = len(mic_data)
                    t = np.linspace(0, npts / inputRate, npts)
                    pl = appObj.spCal_micInput
                    pl.clear()
                    pl.plot(t, mic_data, pen='b')

                    labelStyle = appObj.xLblStyle
                    pl.setLabel('bottom', 'Time', 's', **labelStyle)
                    labelStyle = appObj.yLblStyle
                    pl.setLabel('left', 'Response', 'Pa', **labelStyle)

                    micData, spCal = processSpkCalData(mic_data, freq,
                                                       freq_idx, inputRate,
                                                       spCal, spkIdx, audioHW)

                    pl = appObj.spCal_micFFT
                    pl.clear()
                    df = micData.fft_freq[1] - micData.fft_freq[0]
                    nf = len(micData.fft_freq)
                    i1 = int(freq_array[0] * 0.9 / df)
                    i2 = int(freq_array[-1] * 1.1 / df)
                    print(
                        "SpeakerCalibration: df= %0.3f i1= %d i2= %d nf= %d" %
                        (df, i1, i2, nf))
                    pl.plot(micData.fft_freq[i1:i2],
                            micData.fft_mag[i1:i2],
                            pen='b')
                    labelStyle = appObj.xLblStyle
                    pl.setLabel('bottom', 'Frequency', 'Hz', **labelStyle)
                    labelStyle = appObj.yLblStyle
                    pl.setLabel('left', 'Magnitude', 'db SPL', **labelStyle)

                    pl = appObj.spCal_spkResp
                    pl.clear()
                    #                pl.plot(1000*spCal.freq[spkIdx, :], spCal.magResp[spkIdx, :], pen="b", symbol='o')
                    pl.plot(freq_array,
                            spCal.magResp[spkIdx, :],
                            pen="b",
                            symbol='o')
                    labelStyle = appObj.xLblStyle
                    pl.setLabel('bottom', 'Frequency', 'Hz', **labelStyle)
                    labelStyle = appObj.yLblStyle
                    pl.setLabel('left', 'Magnitude', 'db SPL', **labelStyle)

                    freq_idx += 1

                    #                if appObj.getSaveState():
                    #                    if not isSaveDirInit:
                    #                        saveDir = OCTCommon.initSaveDir(saveOpts, 'Speaker Calibration', audioParams=audioParams)
                    #                        isSaveDirInit = True
                    #
                    #                    if saveOpts.saveRaw:
                    #                        OCTCommon.saveRawData(mic_data, saveDir, frameNum, dataType=3)

                    QtGui.QApplication.processEvents(
                    )  # check for GUI events, such as button presses

                    # if done flag, break out of loop
                    if appObj.doneFlag:
                        break

                frameNum += 1

            # if done flag, break out of loop
            if appObj.doneFlag:
                break

        if not appObj.doneFlag:
            saveDir = appObj.configPath
            saveSpeakerCal(spCal, saveDir)
            appObj.audioHW.loadSpeakerCalFromProcData(spCal)
            appObj.spCal = spCal

    except Exception as ex:
        traceback.print_exc(file=sys.stdout)
        QtGui.QMessageBox.critical(
            appObj, "Error",
            "Error during calibration. Check command line output for details")

    8  # update the audio hardware speaker calibration
    appObj.isCollecting = False
    QtGui.QApplication.processEvents(
    )  # check for GUI events, such as button presses
    appObj.finishCollection()
Beispiel #15
0
def runDPOAE(appObj, testMode=False):
    print("runDPOAE")
    
    try:
        appObj.tabWidget.setCurrentIndex(2)
        appObj.doneFlag = False
        appObj.isCollecting = True
        # trigRate = octfpga.GetTriggerRate()
        audioHW = appObj.audioHW
        bioamp = appObj.bioamp
        outputRate = audioHW.DAQOutputRate
        inputRate = audioHW.DAQInputRate
    
        # freq_array2 = audioParams.freq[1, :]
        freqArray = appObj.getFrequencyArray()
        
        if testMode:
            testDataDir = os.path.join(appObj.basePath, 'exampledata', 'DPOAE')
    #        filePath = os.path.join(testDataDir, 'AudioParams.pickle')
    #        f = open(filePath, 'rb')
    #        audioParams = pickle.load(f)
    #        f.close()
        else:
            # freqArray = appObj.getFrequencyArray()
            i1 = appObj.DPOAE_freqLow_comboBox.currentIndex()
            i2 = appObj.DPOAE_freqHigh_comboBox.currentIndex()
            print("runDPOAE: i1= ", i1, "i2= ", i2)
    
            ampLow = appObj.DPOAE_ampLow_spinBox.value()
            ampHigh = appObj.DPOAE_ampHigh_spinBox.value()
            ampDelta = appObj.DPOAE_ampDelta_spinBox.value()
            
            # ampArray = np.arange(ampLow, ampHigh, ampDelta)
            #numSteps = np.floor((ampHigh - ampLow)/ampDelta) + 1
            #ampArray = np.linspace(ampLow, ampHigh, numSteps)
            ampArray = np.arange(ampLow, ampHigh, ampDelta)
            if ampArray[-1] != ampHigh:
                ampArray = np.hstack((ampArray, ampHigh))
            
            freqArray = freqArray[i1:i2+1]
    
        # numSpk = audioParams.getNumSpeakers()
        if not testMode:
            from DAQHardware import DAQHardware
            daq = DAQHardware()
    
        chanNamesIn= [ audioHW.mic_daqChan, bioamp.daqChan]
        micVoltsPerPascal = audioHW.micVoltsPerPascal
        trialDur = appObj.DPOAE_stimDuration_dblSpinBox.value() * 1e-3
        # nReps = appObj.DPOAEtrialReps_spinBox.value()
    
        # set input rate multiple the highest output frequency, a little more than Nyquest so stim frequency is more towards center 
        inputRate = 4*freqArray[-1]
        inputRate = outputRate / int(np.floor(outputRate / inputRate))  # pick closest input rate that evenly divides output rate
        
        frameNum = 0
        isSaveDirInit = False

        attenLines1 = audioHW.attenL_daqChan
        attenLines2 = audioHW.attenR_daqChan
        
        freq_idx = 0
        DPOAEdata = None
        numSpk = appObj.speaker_comboBox.currentIndex()+1
        chanNameOut = audioHW.speakerL_daqChan 
        if numSpk > 1:
            chanNameOut = [audioHW.speakerL_daqChan, audioHW.speakerR_daqChan ]
        print("runDPOAE numSpk=", numSpk)
        
        for freq in freqArray:
            sp1, sp2 = makeDPOAEOutput(freq, trialDur, audioHW)
            # spkOut = np.tile(spkOut_trial, nReps)
            
            npts = len(sp1)
            tOut = np.linspace(0, npts/outputRate, npts)
            print("runDPOAE npts=%d len(spkOut)= %d len(tOut)= %d" % (npts, len(sp1), len(tOut)))
            amp_idx = 0
            # ptsPerRep = inputRate
            
            for amp in ampArray:
                print("runDPOAE freq=" + repr(freq), " amp= ", + amp, " freq_idx= ", freq_idx, " amp_idx= ", amp_idx)
                vOut1, attenLvl1 = audioHW.getCalibratedOutputVoltageAndAttenLevel(freq, amp, 0)
                spkNum = numSpk - 1
                vOut2, attenLvl2 = audioHW.getCalibratedOutputVoltageAndAttenLevel(freq/1.22, amp, spkNum)
                if vOut1 > 0 and vOut2 > 0:
                    # attenSig = AudioHardware.makeLM1972AttenSig(0)
                    if not testMode:
                        if numSpk > 1:
                            audioHW.setAttenuatorLevel(attenLvl1, attenLvl2, daq)
                        else:
                            if attenLvl1 > attenLvl2:
                                dbDiff = attenLvl1 - attenLvl2
                                attenLvl1 = attenLvl2
                                vOut2 = vOut2*(10**(dbDiff/20))
                            elif attenLvl1 < attenLvl2:
                                dbDiff = attenLvl2 - attenLvl1
                                attenLvl2 = attenLvl1
                                vOut1 = vOut1*(10**(dbDiff/20))
                                
                            audioHW.setAttenuatorLevel(attenLvl1, audioHW.maxAtten, daq)
                            
                        # daq.sendDigOutDPOAEd(attenLines, attenSig)
                        # appObj.oct_hw.SetAttenLevel(0, attenLines)
                    
                    pl = appObj.DPOAE_output
                    pl.clear()
                    endIdx = int(5e-3 * outputRate)        # only plot first 5 ms
                    #pl.plot(t[0:endIdx], spkOut[0:endIdx], pen='b')
                    pl.plot(tOut, sp1 + sp2, pen='b')
                    
                    labelStyle = appObj.xLblStyle
                    pl.setLabel('bottom', 'Time', 's', **labelStyle)
                    labelStyle = appObj.yLblStyle
                    pl.setLabel('left', 'Output', 'V', **labelStyle)
                    
                    numInputSamples = int(inputRate*len(sp1)/outputRate)
                    
                    if testMode:
                        # mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)                    
                        pass
                    else:
        
                        # setup the output task
                        if numSpk > 1:
                            spkOut = np.vstack((vOut1*sp1, vOut2*sp2))
                        else:
                            spkOut = vOut1*sp1 + vOut2*sp2
                            
                        daq.setupAnalogOutput([chanNameOut], audioHW.daqTrigChanIn, int(outputRate), spkOut)
                        daq.startAnalogOutput()
                        
                        # setup the input task
                        daq.setupAnalogInput(chanNamesIn, audioHW.daqTrigChanIn, int(inputRate), numInputSamples) 
                        daq.startAnalogInput()
                    
                        # trigger the acquiisiton by sending ditital pulse
                        daq.sendDigTrig(audioHW.daqTrigChanOut)
                        
                        dataIn = daq.readAnalogInput()
                        mic_data = dataIn[0, :]
                        
                        mic_data = mic_data/micVoltsPerPascal
                    
                        daq.stopAnalogInput()
                        daq.stopAnalogOutput()
                        daq.clearAnalogInput()
                        daq.clearAnalogOutput()
                    
                    npts = len(mic_data)
                    t = np.linspace(0, npts/inputRate, npts)
                    pl = appObj.spCal_micInput
                    pl.clear()
                    pl.plot(t, mic_data, pen='b')
                    
                    labelStyle = appObj.xLblStyle
                    pl.setLabel('bottom', 'Time', 's', **labelStyle)
                    labelStyle = appObj.yLblStyle
                    pl.setLabel('left', 'Response', 'Pa', **labelStyle)
        
                    DPOAEptData, DPOAEdata = processDPOAEData(mic_data, freq, freq_idx, amp_idx, freqArray, ampArray, inputRate, DPOAEdata)
    
                    print("runDPOAE: plotting data")
                    plotDPOAEdata(appObj, DPOAEptData, DPOAEdata)
                    
                QtGui.QApplication.processEvents() # check for GUI events, such as button presses
                
                # if done flag, break out of loop
                if appObj.doneFlag:
                    break
                
                frameNum += 1
                amp_idx += 1
                
            # if done flag, break out of loop
            if appObj.doneFlag:
                break
            
            freq_idx += 1


        saveOpts = appObj.getSaveOpts()
        workbook = appObj.excelWB
        note = saveOpts.note
        number = appObj.DPOAEnumber
        name = 'DPOAE'
        d = datetime.datetime.now()
        timeStr = d.strftime('%H_%M_%S')
        
        saveOpts.saveMicData = appObj.DPOAE_saveMicData_checkBox.isChecked()
        saveOpts.saveMicFFT = appObj.DPOAE_saveMicFFT_checkBox.isChecked()
        saveDir = appObj.saveDir_lineEdit.text()
        
        plotName = 'DPOAE %d %s %s' % (number, timeStr, saveOpts.note)
        plotFilePath = saveDPOAEDataFig(DPOAEdata, trialDur, saveDir, plotName, timeStr)
        
        reply = QtGui.QMessageBox.question(appObj, 'Save', "Keep data?" , QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.Yes)
        if reply == QtGui.QMessageBox.Yes:
            excelWS = CMPCommon.initExcelSpreadsheet(workbook, name, number, timeStr, note)
            saveDPOAEDataXLS(DPOAEdata, trialDur, excelWS, saveOpts)
            #saveDPOAEData(DPOAEdata, trialDur, nReps, appObj.saveFileTxt_filepath, saveOpts, timeStr)
            
            saveDPOAEDataPickle(DPOAEdata, trialDur, plotName, saveOpts, timeStr)
            appObj.DPOAEnumber += 1                
            
        else:
            os.remove(plotFilePath)
            
    except Exception as ex:
        traceback.print_exc(file=sys.stdout)
        QtGui.QMessageBox.critical (appObj, "Error", "Error during collection. Check command line output for details")           
        
    8# update the audio hardware speaker calibration                     
    appObj.isCollecting = False
    QtGui.QApplication.processEvents() # check for GUI events, such as button presses
    appObj.finishCollection()
Beispiel #16
0
def runNoiseExp(appObj, testMode=False):
    print("runNoiseExp")
    appObj.tabWidget.setCurrentIndex(5)
    appObj.doneFlag = False
    appObj.isCollecting = True
    # trigRate = octfpga.GetTriggerRate()
    audioHW = appObj.audioHW
    outputRate = audioHW.DAQOutputRate
    inputRate = audioHW.DAQInputRate
    
    cutoffLow = 1e3*appObj.noiseExp_filterLow_dblSpinBox.value()
    cutoffHigh = 1e3*appObj.noiseExp_filterHigh_dblSpinBox.value()
    durationMins = appObj.noiseExp_duration_dblSpinBox.value()
    durationSecs = durationMins*60
    
    try:
        if not testMode:
            from DAQHardware import DAQHardware
            daq = DAQHardware()
    
        chanNamesIn= [ audioHW.mic_daqChan]
        micVoltsPerPascal = audioHW.micVoltsPerPascal
        # mode = 'chirp'
        
        # make 1 second of noise         
        nOut = outputRate*1  
        magResp = np.zeros(nOut)
        fIdx1 = int(nOut*cutoffLow/outputRate)
        fIdx2 = int(nOut*cutoffHigh/outputRate)
        magResp[fIdx1:fIdx2] = 1
        phaseResp = 2*np.pi*np.random.rand(nOut) - np.pi
        sig = magResp*np.exp(-1j*phaseResp)
        spkOut = np.real(np.fft.ifft(sig))
        mx = np.max(spkOut)
        mn = np.min(spkOut)
        # normalies signal to be between -1 and 1
        spkOut = 2*(spkOut - mn)/(mx - mn) - 1
        
        maxV = audioHW.speakerOutputRng[1]
        spkOut = maxV*spkOut
        
        pl = appObj.spCalTest_output
        pl.clear()
        #endIdx = int(5e-3 * outputRate)        # only plot first 5 ms
        npts = len(spkOut)
        endIdx = npts
        t = np.linspace(0, npts/outputRate, npts)
        pl.plot(t[0:endIdx], spkOut[0:endIdx], pen='b')
                
        numInputSamples = int(inputRate*0.1)
            
        if testMode:
            # mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)                    
            pass
        else:
            chanNameOut = audioHW.speakerL_daqChan 
            attenLines = audioHW.attenL_daqChan
            attenLinesOther = audioHW.attenR_daqChan
                        
            if not testMode:
                AudioHardware.Attenuator.setLevel(0, attenLines)
                AudioHardware.Attenuator.setLevel(60, attenLinesOther)
    
            # setup the output task
            daq.setupAnalogOutput([chanNameOut], audioHW.daqTrigChanIn, int(outputRate), spkOut, isContinuous=True)
            daq.startAnalogOutput()
    
            # trigger the acquiisiton by sending ditital pulse
            daq.sendDigTrig(audioHW.daqTrigChanOut)
        
        tElapsed = 0
        tLast = time.time()
        npts = numInputSamples
        t = np.linspace(0, npts/inputRate, npts)
    
        while tElapsed < durationSecs:
            if not testMode:
                # setup the input task
                daq.setupAnalogInput(chanNamesIn, audioHW.daqTrigChanIn, int(inputRate), numInputSamples) 
                daq.startAnalogInput()
            
                # trigger the acquiisiton by sending ditital pulse
                daq.sendDigTrig(audioHW.daqTrigChanOut)
                
                daq.waitDoneInput()
    
                mic_data = daq.readAnalogInput()
                mic_data = mic_data[0, :]
                
                daq.stopAnalogInput()
                daq.clearAnalogInput()
            else:
                mic_data = np.random.rand(numInputSamples)
                
            mic_data = mic_data/micVoltsPerPascal
            
            pl = appObj.spCalTest_micInput
            pl.clear()
            pl.plot(t, mic_data, pen='b')
            
            labelStyle = appObj.xLblStyle
            pl.setLabel('bottom', 'Time', 's', **labelStyle)
            labelStyle = appObj.yLblStyle
            pl.setLabel('left', 'Response', 'Pa', **labelStyle)

            # calculate RMS
            nMicPts = len(mic_data)
            micRMS = np.mean(mic_data**2)
            micRMS = 20*np.log10(micRMS**0.5/2e-5)
            appObj.spCalTest_rms_label.setText("%0.3f dB" % micRMS)            
            
            nfft = int(2**np.ceil(np.log(nMicPts*5)/np.log(2)))
            print("nfft = ", nfft)
            win_fcn = np.hanning(nMicPts)
            micFFT = 2*np.abs(np.fft.fft(win_fcn*mic_data, nfft))/nMicPts
            micFFT = 2*micFFT[0:len(micFFT) // 2]
            micFFT = 20*np.log10(micFFT/2e-5)
            freq = np.linspace(0, inputRate/2, len(micFFT))
            pl = appObj.spCalTest_micFFT
            pl.clear()
            #df = freq[1] - freq[0]
            #print("NoiseExp: df= %0.3f i1= %d i2= %d nf= %d" % (df, i1, i2, nf))
            pl.plot(freq, micFFT, pen='b')
            labelStyle = appObj.xLblStyle
            pl.setLabel('bottom', 'Frequency', 'Hz', **labelStyle)
            labelStyle = appObj.yLblStyle
            pl.setLabel('left', 'Magnitude', 'db SPL', **labelStyle)
            
            QtGui.QApplication.processEvents() # check for GUI events, such as button presses
            
            # if done flag, break out of loop
            if appObj.doneFlag:
                break      
            
            t1 = time.time()
            tElapsed += (t1 - tLast)
            tLast = t1
    
        
        if not testMode:
            daq.stopAnalogOutput()
            daq.clearAnalogOutput()
            
    except Exception as ex:
        traceback.print_exc(file=sys.stdout)
        QtGui.QMessageBox.critical (appObj, "Error", "Error during calibration. Check command line output for details")           
        
    8# update the audio hardware speaker calibration                     
    appObj.isCollecting = False
    QtGui.QApplication.processEvents() # check for GUI events, such as button presses
    appObj.finishCollection()


    
Beispiel #17
0
def runNoiseExp(appObj, testMode=False):
    print("runNoiseExp")
    appObj.tabWidget.setCurrentIndex(5)
    appObj.doneFlag = False
    appObj.isCollecting = True
    # trigRate = octfpga.GetTriggerRate()
    audioHW = appObj.audioHW
    outputRate = audioHW.DAQOutputRate
    inputRate = audioHW.DAQInputRate

    cutoffLow = 1e3 * appObj.noiseExp_filterLow_dblSpinBox.value()
    cutoffHigh = 1e3 * appObj.noiseExp_filterHigh_dblSpinBox.value()
    durationMins = appObj.noiseExp_duration_dblSpinBox.value()
    durationSecs = durationMins * 60

    try:
        if not testMode:
            from DAQHardware import DAQHardware
            daq = DAQHardware()

        chanNamesIn = [audioHW.mic_daqChan]
        micVoltsPerPascal = audioHW.micVoltsPerPascal
        # mode = 'chirp'

        # make 1 second of noise
        nOut = outputRate * 1
        magResp = np.zeros(nOut)
        fIdx1 = int(nOut * cutoffLow / outputRate)
        fIdx2 = int(nOut * cutoffHigh / outputRate)
        magResp[fIdx1:fIdx2] = 1
        phaseResp = 2 * np.pi * np.random.rand(nOut) - np.pi
        sig = magResp * np.exp(-1j * phaseResp)
        spkOut = np.real(np.fft.ifft(sig))
        mx = np.max(spkOut)
        mn = np.min(spkOut)
        # normalies signal to be between -1 and 1
        spkOut = 2 * (spkOut - mn) / (mx - mn) - 1

        maxV = audioHW.speakerOutputRng[1]
        spkOut = maxV * spkOut

        pl = appObj.spCalTest_output
        pl.clear()
        #endIdx = int(5e-3 * outputRate)        # only plot first 5 ms
        npts = len(spkOut)
        endIdx = npts
        t = np.linspace(0, npts / outputRate, npts)
        pl.plot(t[0:endIdx], spkOut[0:endIdx], pen='b')

        numInputSamples = int(inputRate * 0.1)

        if testMode:
            # mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)
            pass
        else:
            chanNameOut = audioHW.speakerL_daqChan
            attenLines = audioHW.attenL_daqChan
            attenLinesOther = audioHW.attenR_daqChan

            if not testMode:
                AudioHardware.Attenuator.setLevel(0, attenLines)
                AudioHardware.Attenuator.setLevel(60, attenLinesOther)

            # setup the output task
            daq.setupAnalogOutput([chanNameOut],
                                  audioHW.daqTrigChanIn,
                                  int(outputRate),
                                  spkOut,
                                  isContinuous=True)
            daq.startAnalogOutput()

            # trigger the acquiisiton by sending ditital pulse
            daq.sendDigTrig(audioHW.daqTrigChanOut)

        tElapsed = 0
        tLast = time.time()
        npts = numInputSamples
        t = np.linspace(0, npts / inputRate, npts)

        while tElapsed < durationSecs:
            if not testMode:
                # setup the input task
                daq.setupAnalogInput(chanNamesIn, audioHW.daqTrigChanIn,
                                     int(inputRate), numInputSamples)
                daq.startAnalogInput()

                # trigger the acquiisiton by sending ditital pulse
                daq.sendDigTrig(audioHW.daqTrigChanOut)

                daq.waitDoneInput()

                mic_data = daq.readAnalogInput()
                mic_data = mic_data[0, :]

                daq.stopAnalogInput()
                daq.clearAnalogInput()
            else:
                mic_data = np.random.rand(numInputSamples)

            mic_data = mic_data / micVoltsPerPascal

            pl = appObj.spCalTest_micInput
            pl.clear()
            pl.plot(t, mic_data, pen='b')

            labelStyle = appObj.xLblStyle
            pl.setLabel('bottom', 'Time', 's', **labelStyle)
            labelStyle = appObj.yLblStyle
            pl.setLabel('left', 'Response', 'Pa', **labelStyle)

            # calculate RMS
            nMicPts = len(mic_data)
            micRMS = np.mean(mic_data**2)
            micRMS = 20 * np.log10(micRMS**0.5 / 2e-5)
            appObj.spCalTest_rms_label.setText("%0.3f dB" % micRMS)

            nfft = int(2**np.ceil(np.log(nMicPts * 5) / np.log(2)))
            print("nfft = ", nfft)
            win_fcn = np.hanning(nMicPts)
            micFFT = 2 * np.abs(np.fft.fft(win_fcn * mic_data, nfft)) / nMicPts
            micFFT = 2 * micFFT[0:len(micFFT) // 2]
            micFFT = 20 * np.log10(micFFT / 2e-5)
            freq = np.linspace(0, inputRate / 2, len(micFFT))
            pl = appObj.spCalTest_micFFT
            pl.clear()
            #df = freq[1] - freq[0]
            #print("NoiseExp: df= %0.3f i1= %d i2= %d nf= %d" % (df, i1, i2, nf))
            pl.plot(freq, micFFT, pen='b')
            labelStyle = appObj.xLblStyle
            pl.setLabel('bottom', 'Frequency', 'Hz', **labelStyle)
            labelStyle = appObj.yLblStyle
            pl.setLabel('left', 'Magnitude', 'db SPL', **labelStyle)

            QtGui.QApplication.processEvents(
            )  # check for GUI events, such as button presses

            # if done flag, break out of loop
            if appObj.doneFlag:
                break

            t1 = time.time()
            tElapsed += (t1 - tLast)
            tLast = t1

        if not testMode:
            daq.stopAnalogOutput()
            daq.clearAnalogOutput()

    except Exception as ex:
        traceback.print_exc(file=sys.stdout)
        QtGui.QMessageBox.critical(
            appObj, "Error",
            "Error during calibration. Check command line output for details")

    8  # update the audio hardware speaker calibration
    appObj.isCollecting = False
    QtGui.QApplication.processEvents(
    )  # check for GUI events, such as button presses
    appObj.finishCollection()
Beispiel #18
0
def runCM(appObj, testMode=False):
    print("runCM")
    
    appObj.tabWidget.setCurrentIndex(4)
    appObj.doneFlag = False
    appObj.isCollecting = True
    # trigRate = octfpga.GetTriggerRate()
    audioHW = appObj.audioHW
    bioamp = appObj.bioamp
    outputRate = audioHW.DAQOutputRate
    inputRate = audioHW.DAQInputRate

    # freq_array2 = audioParams.freq[1, :]
    freqArray = appObj.getFrequencyArray()
    
    if testMode:
        testDataDir = os.path.join(appObj.basePath, 'exampledata', 'Speaker Calibration')
#        filePath = os.path.join(testDataDir, 'AudioParams.pickle')
#        f = open(filePath, 'rb')
#        audioParams = pickle.load(f)
#        f.close()
    else:
        # freqArray = appObj.getFrequencyArray()
        i1 = appObj.CM_freqLow_comboBox.currentIndex()
        i2 = appObj.CM_freqHigh_comboBox.currentIndex()
        print("runCM: i1= ", i1, "i2= ", i2)

        ampLow = appObj.CMampLow_spinBox.value()
        ampHigh = appObj.CMampHigh_spinBox.value()
        ampDelta = appObj.CMampDelta_spinBox.value()
        
        # ampArray = np.arange(ampLow, ampHigh, ampDelta)
        #numSteps = np.floor((ampHigh - ampLow)/ampDelta) + 1
        #ampArray = np.linspace(ampLow, ampHigh, numSteps)
        ampArray = np.arange(ampLow, ampHigh, ampDelta)
        if ampArray[-1] != ampHigh:
            ampArray = np.hstack((ampArray, ampHigh))
        
        freqArray = freqArray[i1:i2+1]

    # numSpk = audioParams.getNumSpeakers()
    if not testMode:
        from DAQHardware import DAQHardware
        daq = DAQHardware()

    chanNamesIn= [ audioHW.mic_daqChan, bioamp.daqChan]
    micVoltsPerPascal = audioHW.micVoltsPerPascal
    trialDur = appObj.CMstimDuration_dblSpinBox.value() * 1e-3
    stimOffset = appObj.CMstimOffset_dblSpinBox.value() * 1e-3
    nReps = appObj.CMtrialReps_spinBox.value()

    # set input rate to three times the highest output frequency, to allow plus a 
    
    #inputRate = 3*freqArray[-1]
    # inputRate = outputRate / int(np.floor(outputRate / inputRate))  # pick closest input rate that evenly divides output rate
    
    
    try:
        frameNum = 0
        isSaveDirInit = False
        chanNameOut = audioHW.speakerL_daqChan 
        attenLines = audioHW.attenL_daqChan
        
        freq_idx = 0
        CMdata = None
        
        for freq in freqArray:
            spkOut_trial = makeCMOutput(freq, trialDur, stimOffset, audioHW)
            spkOut = np.tile(spkOut_trial, nReps)
            
            npts = len(spkOut_trial)
            tOut = np.linspace(0, npts/outputRate, npts)
            print("runCM npts=%d len(spkOut_trial)= %d len(tOut)= %d" % (npts, len(spkOut_trial), len(tOut)))
            amp_idx = 0
            ptsPerRep = inputRate
            
            for amp in ampArray:
                print("runCM freq=" + repr(freq), " amp= ", + amp, " freq_idx= ", freq_idx, " amp_idx= ", amp_idx)
                vOut, attenLvl = audioHW.getCalibratedOutputVoltageAndAttenLevel(freq, amp, 0)
                
                # attenSig = AudioHardware.makeLM1972AttenSig(0)
                if not testMode:
                    # AudioHardware.Attenuator.setLevel(attenLvl, attenLines)
                    audioHW.setAttenuatorLevel(attenLvl, audioHW.maxAtten, daq)
                    # daq.sendDigOutCmd(attenLines, attenSig)
                    # appObj.oct_hw.SetAttenLevel(0, attenLines)
                
                pl = appObj.spCal_output
                pl.clear()
                endIdx = int(5e-3 * outputRate)        # only plot first 5 ms
                #pl.plot(t[0:endIdx], spkOut[0:endIdx], pen='b')
                pl.plot(tOut, spkOut_trial, pen='b')
                
                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Time', 's', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Output', 'V', **labelStyle)
                        
                
                numInputSamples = nReps*int(inputRate*len(spkOut_trial)/outputRate)
                
                
                if testMode:
                    # mic_data = OCTCommon.loadRawData(testDataDir, frameNum, dataType=3)                    
                    pass
                else:
    
                    # setup the output task
                    daq.setupAnalogOutput([chanNameOut], audioHW.daqTrigChanIn, int(outputRate), vOut*spkOut)
                    daq.startAnalogOutput()
                    
                    # setup the input task
                    daq.setupAnalogInput(chanNamesIn, audioHW.daqTrigChanIn, int(inputRate), numInputSamples) 
                    daq.startAnalogInput()
                
                    # trigger the acquiisiton by sending ditital pulse
                    daq.sendDigTrig(audioHW.daqTrigChanOut)
                    
                    timeout = numInputSamples/inputRate + 2
                    dataIn = daq.readAnalogInput(timeout)
                    mic_data = dataIn[0, :]
                    bioamp_data = dataIn[1, :]
                    
                    mic_data = mic_data/micVoltsPerPascal
                    bioamp_data = bioamp_data/bioamp.gain
                
                    daq.waitDoneOutput(stopAndClear=True)
                    daq.stopAnalogInput()
                    daq.clearAnalogInput()
                
                npts = len(mic_data)
                t = np.linspace(0, npts/inputRate, npts)
                pl = appObj.spCal_micInput
                pl.clear()
                pl.plot(t, mic_data, pen='b')
                
                labelStyle = appObj.xLblStyle
                pl.setLabel('bottom', 'Time', 's', **labelStyle)
                labelStyle = appObj.yLblStyle
                pl.setLabel('left', 'Response', 'Pa', **labelStyle)
    
    # def processCMData(mic_data, bioamp_data, nReps, freq, amp_idx, inputRate, CMdataIn):            
                CMptData, CMdata = processCMData(mic_data, bioamp_data, nReps, freq, freq_idx, amp_idx, freqArray, ampArray, inputRate, CMdata)

                print("runCM: plotting data")
                plotCMdata(appObj, CMptData, CMdata)
                
    #                if appObj.getSaveState():
    #                    if not isSaveDirInit:
    #                        saveDir = OCTCommon.initSaveDir(saveOpts, 'Speaker Calibration', audioParams=audioParams)
    #                        isSaveDirInit = True
    #    
    #                    if saveOpts.saveRaw:
    #                        OCTCommon.saveRawData(mic_data, saveDir, frameNum, dataType=3)
                    
                QtGui.QApplication.processEvents() # check for GUI events, such as button presses
                
                # if done flag, break out of loop
                if appObj.doneFlag:
                    break
                
                frameNum += 1
                amp_idx += 1
                
            # if done flag, break out of loop
            if appObj.doneFlag:
                break
            
            freq_idx += 1


        saveOpts = appObj.getSaveOpts()
        workbook = appObj.excelWB
        note = saveOpts.note
        number = appObj.CMnumber
        name = 'CM'
        d = datetime.datetime.now()
        timeStr = d.strftime('%H_%M_%S')
        excelWS = CMPCommon.initExcelSpreadsheet(workbook, name, number, timeStr, note)
    
        appObj.CMnumber += 1                
        saveOpts.saveTracings = appObj.CM_saveTracings_checkBox.isChecked(  )
        saveDir = appObj.saveDir_lineEdit.text()
        saveCMDataXLS(CMdata, trialDur, nReps, excelWS, saveOpts)
        #saveCMData(CMdata, trialDur, nReps, appObj.saveFileTxt_filepath, saveOpts, timeStr)
        
        plotName = 'CM %d %s %s' % (number, timeStr, saveOpts.note)
        saveCMDataFig(CMdata, trialDur, nReps, saveDir, plotName, timeStr)
        saveCMDataPickle(CMdata, trialDur, nReps, plotName, saveOpts, timeStr)
            
    except Exception as ex:
        traceback.print_exc(file=sys.stdout)
        QtGui.QMessageBox.critical (appObj, "Error", "Error during collection. Check command line output for details")
        
    8# update the audio hardware speaker calibration                     
    appObj.isCollecting = False
    QtGui.QApplication.processEvents() # check for GUI events, such as button presses
    appObj.finishCollection()