def spectralfilterprops(thefilter, debug=False): lowerstop, lowerpass, upperpass, upperstop = thefilter['filter'].getfreqlimits() lowerstopindex = valtoindex(thefilter['frequencies'], lowerstop) lowerpassindex = valtoindex(thefilter['frequencies'], lowerpass) upperpassindex = valtoindex(thefilter['frequencies'], upperpass) upperstopindex = np.min([valtoindex(thefilter['frequencies'], upperstop), len(thefilter['frequencies']) - 1]) if debug: print('target freqs:', lowerstop, lowerpass, upperpass, upperstop) print('actual freqs:', thefilter['frequencies'][lowerstopindex], thefilter['frequencies'][lowerpassindex], thefilter['frequencies'][upperpassindex], thefilter['frequencies'][upperstopindex]) response = {} passbandmean = np.mean(thefilter['transferfunc'][lowerpassindex:upperpassindex]) passbandmax = np.max(thefilter['transferfunc'][lowerpassindex:upperpassindex]) passbandmin = np.min(thefilter['transferfunc'][lowerpassindex:upperpassindex]) response['passbandripple'] = (passbandmax - passbandmin)/passbandmean if lowerstopindex > 2: response['lowerstopmean'] = np.mean(thefilter['transferfunc'][0:lowerstopindex])/passbandmean response['lowerstopmax'] = np.max(np.abs(thefilter['transferfunc'][0:lowerstopindex]))/passbandmean else: response['lowerstopmean'] = 0.0 response['lowerstopmax'] = 0.0 if len(thefilter['transferfunc']) - upperstopindex > 2: response['upperstopmean'] = np.mean(thefilter['transferfunc'][upperstopindex:-1])/passbandmean response['upperstopmax'] = np.max(np.abs(thefilter['transferfunc'][upperstopindex:-1]))/passbandmean else: response['upperstopmean'] = 0.0 response['upperstopmax'] = 0.0 return response
def track(self, x, fs): self.freqs, self.times, thespectrogram = sp.signal.spectrogram( np.concatenate( [ np.zeros(int(self.nperseg // 2)), x, np.zeros(int(self.nperseg // 2)) ], axis=0, ), fs=fs, detrend="constant", scaling="spectrum", nfft=None, window=np.hamming(self.nfft), noverlap=(self.nperseg - 1), ) lowerliminpts = tide_util.valtoindex(self.freqs, self.lowerlim) upperliminpts = tide_util.valtoindex(self.freqs, self.upperlim) if self.debug: print(self.times.shape, self.freqs.shape, thespectrogram.shape) print(self.times) # intitialize the peak fitter thefitter = SimilarityFunctionFitter( corrtimeaxis=self.freqs, lagmin=self.lowerlim, lagmax=self.upperlim, absmaxsigma=10.0, absminsigma=0.1, debug=self.debug, peakfittype="fastquad", zerooutbadfit=False, useguess=False, ) peakfreqs = np.zeros((thespectrogram.shape[1] - 1), dtype=float) for i in range(0, thespectrogram.shape[1] - 1): ( maxindex, peakfreqs[i], maxval, maxsigma, maskval, failreason, peakstart, peakend, ) = thefitter.fit(thespectrogram[:, i]) if not (lowerliminpts <= maxindex <= upperliminpts): peakfreqs[i] = -1.0 return self.times[:-1], peakfreqs
def spectralfilterprops(thefilter, debug=False): lowerstop, lowerpass, upperpass, upperstop = thefilter["filter"].getfreqs() lowerstopindex = valtoindex(thefilter["frequencies"], lowerstop) lowerpassindex = valtoindex(thefilter["frequencies"], lowerpass) upperpassindex = valtoindex(thefilter["frequencies"], upperpass) upperstopindex = np.min([ valtoindex(thefilter["frequencies"], upperstop), len(thefilter["frequencies"]) - 1 ]) if debug: print("target freqs:", lowerstop, lowerpass, upperpass, upperstop) print( "actual freqs:", thefilter["frequencies"][lowerstopindex], thefilter["frequencies"][lowerpassindex], thefilter["frequencies"][upperpassindex], thefilter["frequencies"][upperstopindex], ) response = {} passbandmean = np.mean( thefilter["transferfunc"][lowerpassindex:upperpassindex]) passbandmax = np.max( thefilter["transferfunc"][lowerpassindex:upperpassindex]) passbandmin = np.min( thefilter["transferfunc"][lowerpassindex:upperpassindex]) response["passbandripple"] = (passbandmax - passbandmin) / passbandmean if lowerstopindex > 2: response["lowerstopmean"] = ( np.mean(thefilter["transferfunc"][0:lowerstopindex]) / passbandmean) response["lowerstopmax"] = ( np.max(np.abs(thefilter["transferfunc"][0:lowerstopindex])) / passbandmean) else: response["lowerstopmean"] = 0.0 response["lowerstopmax"] = 0.0 if len(thefilter["transferfunc"]) - upperstopindex > 2: response["upperstopmean"] = ( np.mean(thefilter["transferfunc"][upperstopindex:-1]) / passbandmean) response["upperstopmax"] = ( np.max(np.abs(thefilter["transferfunc"][upperstopindex:-1])) / passbandmean) else: response["upperstopmean"] = 0.0 response["upperstopmax"] = 0.0 return response
def setlimits(self, freqmin, freqmax): self.freqmin = freqmin self.freqmax = freqmax if self.freqaxisvalid: self.freqmininpts = np.max( [0, tide_util.valtoindex(self.freqaxis, self.freqmin)]) self.freqmaxinpts = np.min([ tide_util.valtoindex(self.freqaxis, self.freqmax), len(self.freqaxis) - 1, ]) if self.debug: print("setlimits:") print("\tfreqmin,freqmax:", self.freqmin, self.freqmax) print("\tfreqmininpts,freqmaxinpts:", self.freqmininpts, self.freqmaxinpts)
def getpeaks(xvals, yvals, xrange=None, bipolar=False, display=False): peaks, dummy = find_peaks(yvals, height=0) if bipolar: negpeaks, dummy = find_peaks(-yvals, height=0) peaks = np.concatenate((peaks, negpeaks)) procpeaks = [] if xrange is None: lagmin = xvals[0] lagmax = xvals[-1] else: lagmin = xrange[0] lagmax = xrange[1] originloc = tide_util.valtoindex(xvals, 0.0, discrete=False) for thepeak in peaks: if lagmin <= xvals[thepeak] <= lagmax: if bipolar: procpeaks.append([ xvals[thepeak], yvals[thepeak], tide_util.valtoindex(xvals, xvals[thepeak], discrete=False) - originloc, ]) else: if yvals[thepeak] > 0.0: procpeaks.append([ xvals[thepeak], yvals[thepeak], tide_util.valtoindex( xvals, xvals[thepeak], discrete=False) - originloc, ]) if display: plotx = [] ploty = [] offset = [] for thepeak in procpeaks: plotx.append(thepeak[0]) ploty.append(thepeak[1]) offset.append(thepeak[2]) plt.plot(xvals, yvals) plt.plot(plotx, ploty, "x") plt.plot(xvals, np.zeros_like(yvals), "--", color="gray") plt.show() return procpeaks
def autocorrcheck(corrscale, thexcorr, delta=0.1, acampthresh=0.1, aclagthresh=10.0, displayplots=False, prewindow=True, detrendorder=1, debug=False): """ Parameters ---------- corrscale thexcorr delta acampthresh aclagthresh displayplots prewindow detrendorder debug Returns ------- """ lookahead = 2 peaks = tide_fit.peakdetect(thexcorr, x_axis=corrscale, delta=delta, lookahead=lookahead) maxpeaks = np.asarray(peaks[0], dtype='float64') minpeaks = np.asarray(peaks[1], dtype='float64') if len(peaks[0]) > 0: if debug: print(peaks) zeropkindex = np.argmin(abs(maxpeaks[:, 0])) for i in range(zeropkindex + 1, maxpeaks.shape[0]): if maxpeaks[i, 0] > aclagthresh: return None, None if maxpeaks[i, 1] > acampthresh: sidelobetime = maxpeaks[i, 0] sidelobeindex = tide_util.valtoindex(corrscale, sidelobetime) sidelobeamp = thexcorr[sidelobeindex] numbins = 1 while (sidelobeindex + numbins < np.shape(corrscale)[0] - 1) and ( thexcorr[sidelobeindex + numbins] > sidelobeamp / 2.0): numbins += 1 sidelobewidth = (corrscale[sidelobeindex + numbins] - corrscale[sidelobeindex]) * 2.0 fitstart = sidelobeindex - numbins fitend = sidelobeindex + numbins sidelobeamp, sidelobetime, sidelobewidth = tide_fit.gaussfit(sidelobeamp, sidelobetime, sidelobewidth, corrscale[fitstart:fitend + 1], thexcorr[fitstart:fitend + 1]) if displayplots: pl.plot(corrscale[fitstart:fitend + 1], thexcorr[fitstart:fitend + 1], 'k', corrscale[fitstart:fitend + 1], tide_fit.gauss_eval(corrscale[fitstart:fitend + 1], [sidelobeamp, sidelobetime, sidelobewidth]), 'r') pl.show() return sidelobetime, sidelobeamp return None, None
def test_valtoindex(debug=False): tr = 1.0 testtr = 0.7 xaxislen = 100 shiftdist = 30 xaxis = np.arange(0.0, tr * xaxislen, tr) minx = np.min(xaxis) maxx = np.max(xaxis) testvec = np.arange(-1.0, 1.1 * maxx, testtr) for i in range(len(testvec)): testval = testvec[i] indclosest = valtoindex(xaxis, testval) print(testval, xaxis[indclosest])
def setreftc(self, reftc): self.reftc = reftc + 0.0 self.prepreftc = self.preptc(self.reftc) # get frequency axis, etc self.freqaxis, self.thecoherence = sp.signal.coherence(self.prepreftc, self.prepreftc, fs=self.Fs) # window=self.windowfunc)''' self.similarityfunclen = len(self.thecoherence) self.similarityfuncorigin = 0 self.freqaxisvalid = True self.datavalid = False if self.freqmin is None or self.freqmax is None: self.setlimits(self.freqaxis[0], self.freqaxis[-1]) self.freqmininpts = tide_util.valtoindex(self.freqaxis, self.freqmin, discretization="floor", debug=self.debug) self.freqmaxinpts = tide_util.valtoindex(self.freqaxis, self.freqmax, discretization="ceiling", debug=self.debug)
def fit(self, incorrfunc): # check to make sure xcorr_x and xcorr_y match if self.corrtimeaxis is None: print("Correlation time axis is not defined - exiting") sys.exit() if len(self.corrtimeaxis) != len(incorrfunc): print( "Correlation time axis and values do not match in length (", len(self.corrtimeaxis), "!=", len(incorrfunc), "- exiting", ) sys.exit() # set initial parameters # absmaxsigma is in seconds # maxsigma is in Hz # maxlag is in seconds warnings.filterwarnings("ignore", "Number*") failreason = self.FML_NOERROR maskval = np.uint16(1) # start out assuming the fit will succeed binwidth = self.corrtimeaxis[1] - self.corrtimeaxis[0] # set the search range lowerlim = 0 upperlim = len(self.corrtimeaxis) - 1 if self.debug: print( "initial search indices are", lowerlim, "to", upperlim, "(", self.corrtimeaxis[lowerlim], self.corrtimeaxis[upperlim], ")", ) # make an initial guess at the fit parameters for the gaussian # start with finding the maximum value and its location flipfac = 1.0 corrfunc = incorrfunc + 0.0 if self.useguess: maxindex = tide_util.valtoindex(self.corrtimeaxis, self.maxguess) if corrfunc[maxindex] < 0.0: flipfac = -1.0 else: maxindex, flipfac = self._maxindex_noedge(corrfunc) corrfunc *= flipfac maxlag_init = (1.0 * self.corrtimeaxis[maxindex]).astype("float64") maxval_init = corrfunc[maxindex].astype("float64") if self.debug: print( "maxindex, maxlag_init, maxval_init:", maxindex, maxlag_init, maxval_init, ) # set the baseline and baselinedev levels if (self.functype == "correlation") or (self.functype == "hybrid"): baseline = 0.0 baselinedev = 0.0 else: # for mutual information, there is a nonzero baseline, so we want the difference from that. baseline = np.median(corrfunc) baselinedev = mad(corrfunc) if self.debug: print("baseline, baselinedev:", baseline, baselinedev) # then calculate the width of the peak if self.peakfittype == "fastquad" or self.peakfittype == "COM": peakstart = np.max([1, maxindex - 2]) peakend = np.min([len(self.corrtimeaxis) - 2, maxindex + 2]) else: thegrad = np.gradient(corrfunc).astype( "float64") # the gradient of the correlation function if (self.functype == "correlation") or (self.functype == "hybrid"): if self.peakfittype == "quad": peakpoints = np.where( corrfunc > maxval_init - 0.05, 1, 0 ) # mask for places where correlation exceeds searchfrac*maxval_init else: peakpoints = np.where( corrfunc > self.searchfrac * maxval_init, 1, 0 ) # mask for places where correlation exceeds searchfrac*maxval_init else: # for mutual information, there is a flattish, nonzero baseline, so we want the difference from that. peakpoints = np.where( corrfunc > (baseline + self.searchfrac * (maxval_init - baseline)), 1, 0, ) peakpoints[0] = 0 peakpoints[-1] = 0 peakstart = np.max([1, maxindex - 1]) peakend = np.min([len(self.corrtimeaxis) - 2, maxindex + 1]) if self.debug: print("initial peakstart, peakend:", peakstart, peakend) if self.functype == "mutualinfo": while peakpoints[peakend + 1] == 1: peakend += 1 while peakpoints[peakstart - 1] == 1: peakstart -= 1 else: while thegrad[peakend + 1] <= 0.0 and peakpoints[peakend + 1] == 1: peakend += 1 while thegrad[peakstart - 1] >= 0.0 and peakpoints[peakstart - 1] == 1: peakstart -= 1 if self.debug: print("final peakstart, peakend:", peakstart, peakend) # deal with flat peak top while (peakend < (len(self.corrtimeaxis) - 3) and corrfunc[peakend] == corrfunc[peakend - 1]): peakend += 1 while peakstart > 2 and corrfunc[peakstart] == corrfunc[peakstart + 1]: peakstart -= 1 if self.debug: print("peakstart, peakend after flattop correction:", peakstart, peakend) print("\n") for i in range(peakstart, peakend + 1): print(self.corrtimeaxis[i], corrfunc[i]) print("\n") fig = plt.figure() ax = fig.add_subplot(111) ax.set_title("Peak sent to fitting routine") plt.plot( self.corrtimeaxis[peakstart:peakend + 1], corrfunc[peakstart:peakend + 1], "r", ) plt.show() # This is calculated from first principles, but it's always big by a factor or ~1.4. # Which makes me think I dropped a factor if sqrt(2). So fix that with a final division maxsigma_init = np.float64( ((peakend - peakstart + 1) * binwidth / (2.0 * np.sqrt(-np.log(self.searchfrac)))) / np.sqrt(2.0)) if self.debug: print("maxsigma_init:", maxsigma_init) # now check the values for errors if self.hardlimit: rangeextension = 0.0 else: rangeextension = (self.lagmax - self.lagmin) * 0.75 if not ((self.lagmin - rangeextension - binwidth) <= maxlag_init <= (self.lagmax + rangeextension + binwidth)): if maxlag_init <= (self.lagmin - rangeextension - binwidth): failreason |= self.FML_INITLAGLOW maxlag_init = self.lagmin - rangeextension - binwidth else: failreason |= self.FML_INITLAGHIGH maxlag_init = self.lagmax + rangeextension + binwidth if self.debug: print("bad initial") if maxsigma_init > self.absmaxsigma: failreason |= self.FML_INITWIDTHHIGH maxsigma_init = self.absmaxsigma if self.debug: print("bad initial width - too high") if peakend - peakstart < 2: failreason |= self.FML_INITWIDTHLOW maxsigma_init = np.float64( ((2 + 1) * binwidth / (2.0 * np.sqrt(-np.log(self.searchfrac)))) / np.sqrt(2.0)) if self.debug: print("bad initial width - too low") if (self.functype == "correlation") or (self.functype == "hybrid"): if not (self.lthreshval <= maxval_init <= self.uthreshval) and self.enforcethresh: failreason |= self.FML_INITAMPLOW if self.debug: print( "bad initial amp:", maxval_init, "is less than", self.lthreshval, ) if maxval_init < 0.0: failreason |= self.FML_INITAMPLOW maxval_init = 0.0 if self.debug: print("bad initial amp:", maxval_init, "is less than 0.0") if maxval_init > 1.0: failreason |= self.FML_INITAMPHIGH maxval_init = 1.0 if self.debug: print("bad initial amp:", maxval_init, "is greater than 1.0") else: # somewhat different rules for mutual information peaks if ((maxval_init - baseline) < self.lthreshval * baselinedev) or (maxval_init < baseline): failreason |= self.FML_INITAMPLOW maxval_init = 0.0 if self.debug: print("bad initial amp:", maxval_init, "is less than 0.0") if (failreason != self.FML_NOERROR) and self.zerooutbadfit: maxval = np.float64(0.0) maxlag = np.float64(0.0) maxsigma = np.float64(0.0) else: maxval = np.float64(maxval_init) maxlag = np.float64(maxlag_init) maxsigma = np.float64(maxsigma_init) # refine if necessary if self.peakfittype != "None": if self.peakfittype == "COM": X = self.corrtimeaxis[peakstart:peakend + 1] - baseline data = corrfunc[peakstart:peakend + 1] maxval = maxval_init maxlag = np.sum(X * data) / np.sum(data) maxsigma = 10.0 elif self.peakfittype == "gauss": X = self.corrtimeaxis[peakstart:peakend + 1] - baseline data = corrfunc[peakstart:peakend + 1] # do a least squares fit over the top of the peak # p0 = np.array([maxval_init, np.fmod(maxlag_init, lagmod), maxsigma_init], dtype='float64') p0 = np.array([maxval_init, maxlag_init, maxsigma_init], dtype="float64") if self.debug: print("fit input array:", p0) try: plsq, dummy = sp.optimize.leastsq(tide_fit.gaussresiduals, p0, args=(data, X), maxfev=5000) maxval = plsq[0] + baseline maxlag = np.fmod((1.0 * plsq[1]), self.lagmod) maxsigma = plsq[2] except: maxval = np.float64(0.0) maxlag = np.float64(0.0) maxsigma = np.float64(0.0) if self.debug: print("fit output array:", [maxval, maxlag, maxsigma]) elif self.peakfittype == "fastgauss": X = self.corrtimeaxis[peakstart:peakend + 1] - baseline data = corrfunc[peakstart:peakend + 1] # do a non-iterative fit over the top of the peak # 6/12/2015 This is just broken. Gives quantized maxima maxlag = np.float64(1.0 * np.sum(X * data) / np.sum(data)) maxsigma = np.float64( np.sqrt( np.abs(np.sum((X - maxlag)**2 * data) / np.sum(data)))) maxval = np.float64(data.max()) + baseline elif self.peakfittype == "fastquad": maxlag, maxval, maxsigma, ismax, badfit = tide_fit.refinepeak_quad( self.corrtimeaxis, corrfunc, maxindex) elif self.peakfittype == "quad": X = self.corrtimeaxis[peakstart:peakend + 1] data = corrfunc[peakstart:peakend + 1] try: thecoffs = np.polyfit(X, data, 2) a = thecoffs[0] b = thecoffs[1] c = thecoffs[2] maxlag = -b / (2.0 * a) maxval = a * maxlag * maxlag + b * maxlag + c maxsigma = 1.0 / np.fabs(a) if self.debug: print("poly coffs:", a, b, c) print("maxlag, maxval, maxsigma:", maxlag, maxval, maxsigma) except np.lib.polynomial.RankWarning: maxlag = 0.0 maxval = 0.0 maxsigma = 0.0 if self.debug: print("\n") for i in range(len(X)): print(X[i], data[i]) print("\n") fig = plt.figure() ax = fig.add_subplot(111) ax.set_title("Peak and fit") plt.plot(X, data, "r") plt.plot(X, c + b * X + a * X * X, "b") plt.show() else: print("illegal peak refinement type") # check for errors in fit fitfail = False if self.bipolar: lowestcorrcoeff = -1.0 else: lowestcorrcoeff = 0.0 if (self.functype == "correlation") or (self.functype == "hybrid"): if maxval < lowestcorrcoeff: failreason |= self.FML_FITAMPLOW maxval = lowestcorrcoeff if self.debug: print("bad fit amp: maxval is lower than lower limit") fitfail = True if np.abs(maxval) > 1.0: if not self.allowhighfitamps: failreason |= self.FML_FITAMPHIGH if self.debug: print( "bad fit amp: magnitude of", maxval, "is greater than 1.0", ) fitfail = True maxval = 1.0 * np.sign(maxval) else: # different rules for mutual information peaks if ((maxval - baseline) < self.lthreshval * baselinedev) or (maxval < baseline): failreason |= self.FML_FITAMPLOW if self.debug: if (maxval - baseline) < self.lthreshval * baselinedev: print( "FITAMPLOW: maxval - baseline:", maxval - baseline, " < lthreshval * baselinedev:", self.lthreshval * baselinedev, ) if maxval < baseline: print("FITAMPLOW: maxval < baseline:", maxval, baseline) maxval_init = 0.0 if self.debug: print("bad fit amp: maxval is lower than lower limit") if (self.lagmin > maxlag) or (maxlag > self.lagmax): if self.debug: print("bad lag after refinement") if self.lagmin > maxlag: failreason |= self.FML_FITLAGLOW maxlag = self.lagmin else: failreason |= self.FML_FITLAGHIGH maxlag = self.lagmax fitfail = True if maxsigma > self.absmaxsigma: failreason |= self.FML_FITWIDTHHIGH if self.debug: print("bad width after refinement:", maxsigma, ">", self.absmaxsigma) maxsigma = self.absmaxsigma fitfail = True if maxsigma < self.absminsigma: failreason |= self.FML_FITWIDTHLOW if self.debug: print("bad width after refinement:", maxsigma, "<", self.absminsigma) maxsigma = self.absminsigma fitfail = True if fitfail: if self.debug: print("fit fail") if self.zerooutbadfit: maxval = np.float64(0.0) maxlag = np.float64(0.0) maxsigma = np.float64(0.0) maskval = np.uint16(0) # print(maxlag_init, maxlag, maxval_init, maxval, maxsigma_init, maxsigma, maskval, failreason, fitfail) else: maxval = np.float64(maxval_init) maxlag = np.float64(np.fmod(maxlag_init, self.lagmod)) maxsigma = np.float64(maxsigma_init) if failreason != self.FML_NOERROR: maskval = np.uint16(0) if self.debug or self.displayplots: print( "init to final: maxval", maxval_init, maxval, ", maxlag:", maxlag_init, maxlag, ", width:", maxsigma_init, maxsigma, ) if self.displayplots and (self.peakfittype != "None") and (maskval != 0.0): fig = plt.figure() ax = fig.add_subplot(111) ax.set_title("Data and fit") hiresx = np.arange(X[0], X[-1], (X[1] - X[0]) / 10.0) plt.plot( X, data, "ro", hiresx, tide_fit.gauss_eval(hiresx, np.array([maxval, maxlag, maxsigma])), "b-", ) plt.show() return ( maxindex, maxlag, flipfac * maxval, maxsigma, maskval, failreason, peakstart, peakend, )
def findmaxlag_gauss(thexcorr_x, thexcorr_y, lagmin, lagmax, widthlimit, edgebufferfrac=0.0, threshval=0.0, uthreshval=30.0, debug=False, tweaklims=True, zerooutbadfit=True, refine=False, maxguess=0.0, useguess=False, searchfrac=0.5, fastgauss=False, lagmod=1000.0, enforcethresh=True, displayplots=False): """ Parameters ---------- thexcorr_x thexcorr_y lagmin lagmax widthlimit edgebufferfrac threshval uthreshval debug tweaklims zerooutbadfit refine maxguess useguess searchfrac fastgauss lagmod enforcethresh displayplots Returns ------- """ # set initial parameters # widthlimit is in seconds # maxsigma is in Hz # maxlag is in seconds warnings.filterwarnings("ignore", "Number*") failreason = np.uint16(0) maxlag = np.float64(0.0) maxval = np.float64(0.0) maxsigma = np.float64(0.0) maskval = np.uint16(1) numlagbins = len(thexcorr_y) binwidth = thexcorr_x[1] - thexcorr_x[0] searchbins = int(widthlimit // binwidth) lowerlim = int(numlagbins * edgebufferfrac) upperlim = numlagbins - lowerlim - 1 if tweaklims: lowerlim = 0 upperlim = numlagbins - 1 while (thexcorr_y[lowerlim + 1] < thexcorr_y[lowerlim]) and (lowerlim + 1) < upperlim: lowerlim += 1 while (thexcorr_y[upperlim - 1] < thexcorr_y[upperlim]) and (upperlim - 1) > lowerlim: upperlim -= 1 FML_BADAMPLOW = np.uint16(0x01) FML_BADAMPHIGH = np.uint16(0x02) FML_BADSEARCHWINDOW = np.uint16(0x04) FML_BADWIDTH = np.uint16(0x08) FML_BADLAG = np.uint16(0x10) FML_HITEDGE = np.uint16(0x20) FML_FITFAIL = np.uint16(0x40) FML_INITFAIL = np.uint16(0x80) # make an initial guess at the fit parameters for the gaussian # start with finding the maximum value if useguess: maxindex = tide_util.valtoindex(thexcorr_x, maxguess) nlowerlim = int(maxindex - widthlimit / 2.0) nupperlim = int(maxindex + widthlimit / 2.0) if nlowerlim < lowerlim: nlowerlim = lowerlim nupperlim = lowerlim + int(widthlimit) if nupperlim > upperlim: nupperlim = upperlim nlowerlim = upperlim - int(widthlimit) maxval_init = thexcorr_y[maxindex].astype('float64') else: maxindex = (np.argmax(thexcorr_y[lowerlim:upperlim]) + lowerlim).astype('int32') maxval_init = thexcorr_y[maxindex].astype('float64') # now get a location for that value maxlag_init = (1.0 * thexcorr_x[maxindex]).astype('float64') # and calculate the width of the peak upperlimit = len(thexcorr_y) - 1 lowerlimit = 0 i = 0 j = 0 while (maxindex + i <= upperlimit) and (thexcorr_y[maxindex + i] > searchfrac * maxval_init) and (i < searchbins): i += 1 if (maxindex + i > upperlimit) or (i > searchbins): i -= 1 while (maxindex - j >= lowerlimit) and (thexcorr_y[maxindex - j] > searchfrac * maxval_init) and (j < searchbins): j += 1 if (maxindex -j < lowerlimit) or (j > searchbins): j -= 1 # This is calculated from first principles, but it's always big by a factor or ~1.4. # Which makes me think I dropped a factor if sqrt(2). So fix that with a final division maxsigma_init = np.float64(((i + j + 1) * binwidth / (2.0 * np.sqrt(-np.log(searchfrac)))) / np.sqrt(2.0)) # now check the values for errors and refine if necessary fitend = min(maxindex + i + 1, upperlimit) fitstart = max(1, maxindex - j) if not ((lagmin + binwidth) <= maxlag_init <= (lagmax - binwidth)): failreason += FML_HITEDGE if lagmin + binwidth <= maxlag_init: maxlag_init = lagmin + binwidth else: maxlag_init = lagmax - binwidth if i + j + 1 < 3: failreason += FML_BADSEARCHWINDOW maxsigma_init = np.float64((3.0 * binwidth / (2.0 * np.sqrt(-np.log(searchfrac)))) / np.sqrt(2.0)) if maxsigma_init > widthlimit: failreason += FML_BADWIDTH maxsigma_init = widthlimit if (maxval_init < threshval) and enforcethresh: failreason += FML_BADAMPLOW if (maxval_init < 0.0): failreason += FML_BADAMPLOW maxval_init = 0.0 if (maxval_init > 1.0): failreason |= FML_BADAMPHIGH maxval_init = 1.0 if failreason > 0: maskval = np.uint16(0) if failreason > 0 and zerooutbadfit: maxval = np.float64(0.0) maxlag = np.float64(0.0) maxsigma = np.float64(0.0) else: if refine: data = thexcorr_y[fitstart:fitend] X = thexcorr_x[fitstart:fitend] if fastgauss: # do a non-iterative fit over the top of the peak # 6/12/2015 This is just broken. Gives quantized maxima maxlag = np.float64(1.0 * sum(X * data) / sum(data)) # maxsigma = np.sqrt(abs(np.square(sum((X - maxlag)) * data) / sum(data))) maxsigma = np.float64(np.sqrt(np.fabs(np.sum((X - maxlag) ** 2 * data) / np.sum(data)))) maxval = np.float64(data.max()) else: # do a least squares fit over the top of the peak p0 = np.array([maxval_init, maxlag_init, maxsigma_init], dtype='float64') if fitend - fitstart >= 3: plsq, dummy = sp.optimize.leastsq(gaussresiduals, p0, args=(data, X), maxfev=5000) maxval = plsq[0] maxlag = np.fmod((1.0 * plsq[1]), lagmod) maxsigma = plsq[2] # if maxval > 1.0, fit failed catastrophically, zero out or reset to initial value # corrected logic for 1.1.6 if (np.fabs(maxval)) > 1.0 or (lagmin > maxlag) or (maxlag > lagmax): if zerooutbadfit: maxval = np.float64(0.0) maxlag = np.float64(0.0) maxsigma = np.float64(0.0) maskval = np.int16(0) else: maxval = np.float64(maxval_init) maxlag = np.float64(maxlag_init) maxsigma = np.float64(maxsigma_init) else: maxval = np.float64(maxval_init) maxlag = np.float64(np.fmod(maxlag_init, lagmod)) maxsigma = np.float64(maxsigma_init) if maxval == 0.0: failreason += FML_FITFAIL if not (lagmin <= maxlag <= lagmax): failreason += FML_BADLAG if failreason > 0: maskval = np.uint16(0) if failreason > 0 and zerooutbadfit: maxval = np.float64(0.0) maxlag = np.float64(0.0) maxsigma = np.float64(0.0) if debug or displayplots: print("init to final: maxval", maxval_init, maxval, ", maxlag:", maxlag_init, maxlag, ", width:", maxsigma_init, maxsigma) if displayplots and refine and (maskval != 0.0): fig = pl.figure() ax = fig.add_subplot(111) ax.set_title('Data and fit') hiresx = np.arange(X[0], X[-1], (X[1] - X[0]) / 10.0) pl.plot(X, data, 'ro', hiresx, gauss_eval(hiresx, np.array([maxval, maxlag, maxsigma])), 'b-') pl.show() return maxindex, maxlag, maxval, maxsigma, maskval, failreason, fitstart, fitend
def check_autocorrelation( corrscale, thexcorr, delta=0.1, acampthresh=0.1, aclagthresh=10.0, displayplots=False, detrendorder=1, ): """Check for autocorrelation in an array. Parameters ---------- corrscale thexcorr delta acampthresh aclagthresh displayplots windowfunc detrendorder Returns ------- sidelobetime sidelobeamp """ lookahead = 2 peaks = tide_fit.peakdetect(thexcorr, x_axis=corrscale, delta=delta, lookahead=lookahead) maxpeaks = np.asarray(peaks[0], dtype="float64") if len(peaks[0]) > 0: LGR.debug(peaks) zeropkindex = np.argmin(abs(maxpeaks[:, 0])) for i in range(zeropkindex + 1, maxpeaks.shape[0]): if maxpeaks[i, 0] > aclagthresh: return None, None if maxpeaks[i, 1] > acampthresh: sidelobetime = maxpeaks[i, 0] sidelobeindex = tide_util.valtoindex(corrscale, sidelobetime) sidelobeamp = thexcorr[sidelobeindex] numbins = 1 while (sidelobeindex + numbins < np.shape(corrscale)[0] - 1) and ( thexcorr[sidelobeindex + numbins] > sidelobeamp / 2.0 ): numbins += 1 sidelobewidth = ( corrscale[sidelobeindex + numbins] - corrscale[sidelobeindex] ) * 2.0 fitstart = sidelobeindex - numbins fitend = sidelobeindex + numbins sidelobeamp, sidelobetime, sidelobewidth = tide_fit.gaussfit( sidelobeamp, sidelobetime, sidelobewidth, corrscale[fitstart : fitend + 1], thexcorr[fitstart : fitend + 1], ) if displayplots: plt.plot( corrscale[fitstart : fitend + 1], thexcorr[fitstart : fitend + 1], "k", corrscale[fitstart : fitend + 1], tide_fit.gauss_eval( corrscale[fitstart : fitend + 1], [sidelobeamp, sidelobetime, sidelobewidth], ), "r", ) plt.show() return sidelobetime, sidelobeamp return None, None
def congrid(xaxis, loc, val, width, kernel="kaiser", cyclic=True, debug=False): """ Perform a convolution gridding operation with a Kaiser-Bessel or Gaussian kernel of width 'width'. Grid parameters are cached for performance. Parameters ---------- xaxis: array-like The target axis for resampling loc: float The location, in x-axis units, of the sample to be gridded val: float The value to be gridded width: float The width of the gridding kernel in target bins kernel: {'old', 'gauss', 'kaiser'}, optional The type of convolution gridding kernel. Default is 'kaiser'. cyclic: bool, optional When True, gridding wraps around the endpoints of xaxis. Default is True. debug: bool, optional When True, output additional information about the gridding process Returns ------- vals: array-like The input value, convolved with the gridding kernel, projected on to x axis points weights: array-like The values of convolution kernel, projected on to x axis points (used for normalization) indices: array-like The indices along the x axis where the vals and weights fall. Notes ----- See IEEE TRANSACTIONS ON MEDICAL IMAGING. VOL. IO.NO. 3, SEPTEMBER 1991 """ global congridyvals if (congridyvals["kernel"] != kernel) or (congridyvals["width"] != width): if congridyvals["kernel"] != kernel: if debug: print(congridyvals["kernel"], "!=", kernel) if congridyvals["width"] != width: if debug: print(congridyvals["width"], "!=", width) if debug: print("(re)initializing congridyvals") congridyvals = {} congridyvals["kernel"] = kernel congridyvals["width"] = width * 1.0 optsigma = np.array([0.4241, 0.4927, 0.4839, 0.5063, 0.5516, 0.5695, 0.5682, 0.5974]) optbeta = np.array([1.9980, 2.3934, 3.3800, 4.2054, 4.9107, 5.7567, 6.6291, 7.4302]) xstep = xaxis[1] - xaxis[0] if (loc < xaxis[0] - xstep / 2.0 or loc > xaxis[-1] + xstep / 2.0) and not cyclic: print("loc", loc, "not in range", xaxis[0], xaxis[-1]) # choose the smoothing kernel based on the width if kernel != "old": if not (1.5 <= width <= 5.0) or (np.fmod(width, 0.5) > 0.0): print("congrid: width is", width) print("congrid: width must be a half-integral value between 1.5 and 5.0 inclusive") sys.exit() else: kernelindex = int((width - 1.5) // 0.5) # find the closest grid point to the target location, calculate relative offsets from this point center = tide_util.valtoindex(xaxis, loc) offset = np.fmod(np.round((loc - xaxis[center]) / xstep, 3), 1.0) # will vary from -0.5 to 0.5 if cyclic: if center == len(xaxis) - 1 and offset > 0.5: center = 0 offset -= 1.0 if center == 0 and offset < -0.5: center = len(xaxis) - 1 offset += 1.0 if not (-0.5 <= offset <= 0.5): print("(loc, xstep, center, offset):", loc, xstep, center, offset) print("xaxis:", xaxis) sys.exit() offsetkey = str(offset) if kernel == "old": if debug: print("gridding with old kernel") widthinpts = int(np.round(width * 4.6 / xstep)) widthinpts -= widthinpts % 2 - 1 try: yvals = congridyvals[offsetkey] except KeyError: if debug: print("new key:", offsetkey) xvals = ( np.linspace( -xstep * (widthinpts // 2), xstep * (widthinpts // 2), num=widthinpts, endpoint=True, ) + offset ) congridyvals[offsetkey] = tide_fit.gauss_eval(xvals, np.array([1.0, 0.0, width])) yvals = congridyvals[offsetkey] startpt = int(center - widthinpts // 2) indices = range(startpt, startpt + widthinpts) indices = np.remainder(indices, len(xaxis)) if debug: print("center, offset, indices, yvals", center, offset, indices, yvals) return val * yvals, yvals, indices else: offsetinpts = center + offset startpt = int(np.ceil(offsetinpts - width / 2.0)) endpt = int(np.floor(offsetinpts + width / 2.0)) indices = np.remainder(range(startpt, endpt + 1), len(xaxis)) try: yvals = congridyvals[offsetkey] except KeyError: if debug: print("new key:", offsetkey) xvals = indices - center + offset if kernel == "gauss": sigma = optsigma[kernelindex] congridyvals[offsetkey] = tide_fit.gauss_eval(xvals, np.array([1.0, 0.0, sigma])) elif kernel == "kaiser": beta = optbeta[kernelindex] congridyvals[offsetkey] = tide_fit.kaiserbessel_eval( xvals, np.array([beta, width / 2.0]) ) else: print("illegal kernel value in congrid - exiting") sys.exit() yvals = congridyvals[offsetkey] if debug: print("xvals, yvals", xvals, yvals) if debug: print("center, offset, indices, yvals", center, offset, indices, yvals) return val * yvals, yvals, indices
def showtc(args): # set the sample rate if args.samplerate == "auto": samplerate = 1.0 args.samplerate = samplerate else: samplerate = args.samplerate # set the appropriate display mode if args.displaymode == "time": dospectrum = False specmode = "power" elif args.displaymode == "power": dospectrum = True specmode = "power" elif args.displaymode == "phase": dospectrum = True specmode = "phase" else: print("illegal display mode") sys.exit() # determine how to composite multiple plots if args.plotformat == "overlaid": separate = False linky = True elif args.plotformat == "separate": separate = True linky = False elif args.plotformat == "separatelinked": separate = True linky = True else: print("illegal formatting mode") sys.exit() # set various cosmetic aspects of the plots if args.colors is not None: colornames = args.colors.split(",") else: colornames = [] if args.legends is not None: legends = args.legends.split(",") legendset = True else: legends = [] legendset = False dolegend = args.dolegend if args.linewidths is not None: thelinewidth = [] for thestring in args.linewidths.split(","): thelinewidth.append(float(thestring)) else: thelinewidth = [1.0] numlinewidths = len(thelinewidth) if 0 <= args.legendloc <= 10: legendloc = args.legendloc else: print("illegal legend location:", args.legendloc) sys.exit() savespec = False detrendorder = 1 demean = False useHamming = True # check range if args.theendtime is None: args.theendtime = 100000000.0 if args.thestarttime is not None: if args.thestarttime >= args.theendtime: print("endtime must be greater then starttime;") sys.exit() # handle required args first xvecs = [] yvecs = [] linelabels = [] samplerates = [] numvecs = 0 minlen = 100000000 shortcolnames = True # read in all the data for i in range(0, len(args.textfilenames)): thisfilename, thiscolspec = tide_io.parsefilespec(args.textfilenames[i]) # check file type ( thissamplerate, thisstartoffset, colnames, invecs, dummy, dummy, ) = tide_io.readvectorsfromtextfile(args.textfilenames[i], debug=args.debug) if args.debug: print("On return from readvectorsfromtextfile:") print(f"\targs.samplerate: {args.samplerate}") print(f"\tthissamplerate: {thissamplerate}") print(f"\targs.thestarttime: {args.thestarttime}") print(f"\tthisstartoffset: {thisstartoffset}") if args.debug: print("input data dimensions:", invecs.shape) if thissamplerate is None: thissamplerate = samplerate if thisstartoffset is None: # print("thisstartoffset is None") if args.thestarttime is None: print("args.thestarttime is None") args.thestarttime = 0.0 else: print(f"args.thestarttime is {args.thestarttime}") thisstartoffset = args.thestarttime else: # print(f"thisstartoffset is {thisstartoffset}") if args.thestarttime is None: print("args.thestarttime is None") args.thestarttime = thisstartoffset else: print(f"args.thestarttime is {args.thestarttime}") thisstartoffset = args.thestarttime if args.debug: print("After preprocessing time variables:") print(f"\targs.samplerate: {args.samplerate}") print(f"\tthissamplerate: {thissamplerate}") print(f"\targs.thestarttime: {args.thestarttime}") print(f"\tthisstartoffset: {thisstartoffset}") if args.debug: print(f"file {args.textfilenames[i]} colnames: {colnames}") if args.dotranspose: invecs = np.transpose(invecs) if args.debug: print(" ", invecs.shape[0], " columns") for j in range(0, invecs.shape[0]): if args.debug: print("appending vector number ", j) if dospectrum: if invecs.shape[1] % 2 == 1: invec = invecs[j, :-1] else: invec = invecs[j, :] if detrendorder > 0: invec = tide_fit.detrend(invec, order=detrendorder, demean=True) elif demean: invec = invec - np.mean(invec) if useHamming: freqaxis, spectrum = tide_filt.spectrum( tide_filt.hamming(len(invec)) * invec, Fs=thissamplerate, mode=specmode, ) else: freqaxis, spectrum = tide_filt.spectrum( invec, Fs=thissamplerate, mode=specmode ) if savespec: tide_io.writenpvecs( np.transpose(np.stack([freqaxis, spectrum], axis=1)), "thespectrum.txt", ) xvecs.append(freqaxis) yvecs.append(spectrum) else: yvecs.append(invecs[j] * 1.0) xvecs.append( thisstartoffset + np.arange(0.0, len(yvecs[-1]), 1.0) / thissamplerate ) if len(yvecs[-1]) < minlen: minlen = len(yvecs[-1]) if not legendset: if invecs.shape[0] > 1: if colnames is None: if shortcolnames: linelabels.append("column" + str(j).zfill(2)) else: linelabels.append(thisfilename + "_column" + str(j).zfill(2)) else: if shortcolnames: linelabels.append(colnames[j]) else: linelabels.append(thisfilename + "_" + colnames[j]) else: linelabels.append(thisfilename) else: linelabels.append(legends[i % len(legends)]) """if invecs.shape[0] > 1: linelabels.append(legends[i % len(legends)] + '_column' + str(j).zfill(2)) else: linelabels.append(legends[i % len(legends)])""" samplerates.append(thissamplerate + 0.0) if args.debug: print( "timecourse:", j, ", len:", len(xvecs[-1]), ", timerange:", xvecs[-1][0], xvecs[-1][-1], ) numvecs += 1 thestartpoint = tide_util.valtoindex(xvecs[0], args.thestarttime) theendpoint = tide_util.valtoindex(xvecs[0], args.theendtime) args.thestarttime = xvecs[0][thestartpoint] args.theendtime = xvecs[0][theendpoint] if args.debug: print("full range (pts):", thestartpoint, theendpoint) print("full range (time):", args.thestarttime, args.theendtime) overallxmax = -1e38 overallxmin = 1e38 for thevec in xvecs: overallxmax = np.max([np.max(thevec), overallxmax]) overallxmin = np.min([np.min(thevec), overallxmin]) xrange = (np.max([overallxmin, args.thestarttime]), np.min([overallxmax, args.theendtime])) ymins = [] ymaxs = [] for thevec in yvecs: ymins.append(np.min(np.asarray(thevec[thestartpoint:theendpoint], dtype="float"))) ymaxs.append(np.max(np.asarray(thevec[thestartpoint:theendpoint], dtype="float"))) overallymax = -1e38 overallymin = 1e38 for thevec in yvecs: overallymax = np.max([np.max(thevec), overallymax]) overallymin = np.min([np.min(thevec), overallymin]) yrange = (overallymin, overallymax) if args.debug: print("xrange:", xrange) print("yrange:", yrange) if args.voffset < 0.0: args.voffset = yrange[1] - yrange[0] if args.debug: print("voffset:", args.voffset) if not separate: for i in range(0, numvecs): yvecs[i] += (numvecs - i - 1) * args.voffset overallymax = -1e38 overallymin = 1e38 for thevec in yvecs: overallymax = np.max([np.max(thevec), overallymax]) overallymin = np.min([np.min(thevec), overallymin]) yrange = (overallymin, overallymax) if args.dowaterfall: xstep = (xrange[1] - xrange[0]) / numvecs ystep = yrange[1] - yrange[0] for i in range(numvecs): xvecs[i] = xvecs[i] + i * xstep yvecs[i] = 10.0 * yvecs[i] / ystep + i * ystep # now plot it out if separate: thexaxfontsize = 6 * args.fontscalefac theyaxfontsize = 6 * args.fontscalefac thexlabelfontsize = 6 * args.fontscalefac theylabelfontsize = 6 * args.fontscalefac thelegendfontsize = 5 * args.fontscalefac thetitlefontsize = 6 * args.fontscalefac thesuptitlefontsize = 10 * args.fontscalefac else: thexaxfontsize = 10 * args.fontscalefac theyaxfontsize = 10 * args.fontscalefac thexlabelfontsize = 10 * args.fontscalefac theylabelfontsize = 10 * args.fontscalefac thelegendfontsize = 8 * args.fontscalefac thetitlefontsize = 10 * args.fontscalefac thesuptitlefontsize = 10 * args.fontscalefac if len(colornames) > 0: colorlist = [colornames[i % len(colornames)] for i in range(numvecs)] else: colorlist = [cm.nipy_spectral(float(i) / numvecs) for i in range(numvecs)] fig = figure() if separate: if args.thetitle is not None: fig.suptitle(args.thetitle, fontsize=thesuptitlefontsize) if linky: axlist = fig.subplots(numvecs, sharex=True, sharey=True)[:] else: axlist = fig.subplots(numvecs, sharex=True, sharey=False)[:] else: ax = fig.add_subplot(1, 1, 1) if args.thetitle is not None: ax.set_title(args.thetitle, fontsize=thetitlefontsize) for i in range(0, numvecs): if separate: ax = axlist[i] ax.plot( xvecs[i], yvecs[i], color=colorlist[i], label=linelabels[i], linewidth=thelinewidth[i % numlinewidths], ) if dolegend: ax.legend(fontsize=thelegendfontsize, loc=legendloc) ax.set_xlim(xrange) if linky: # print(yrange) ax.set_ylim(yrange) else: themax = np.max(yvecs[i]) themin = np.min(yvecs[i]) thediff = themax - themin # print(themin, themax, thediff) ax.set_ylim(top=(themax + thediff / 20.0), bottom=(themin - thediff / 20.0)) if args.showxax: ax.tick_params(axis="x", labelsize=thexlabelfontsize, which="both") if args.showyax: ax.tick_params(axis="y", labelsize=theylabelfontsize, which="both") if separate: fig.subplots_adjust(hspace=0) setp([a.get_xticklabels() for a in fig.axes[:-1]], visible=False) if dospectrum: if args.xlabel is None: args.xlabel = "Frequency (Hz)" if specmode == "power": if args.ylabel is None: args.ylabel = "Signal power" else: if args.ylabel is None: args.ylabel = "Signal phase" else: if args.xlabel is None: args.xlabel = "Time (s)" if args.showxax: ax.set_xlabel(args.xlabel, fontsize=thexlabelfontsize, fontweight="bold") else: ax.xaxis.set_visible(False) if args.showyax: ax.set_ylabel(args.ylabel, fontsize=theylabelfontsize, fontweight="bold") else: ax.yaxis.set_visible(False) # fig.tight_layout() if args.outputfile is None: show() else: savefig(args.outputfile, bbox_inches="tight", dpi=args.saveres)
def findmaxlag_gauss_rev(thexcorr_x, thexcorr_y, lagmin, lagmax, widthlimit, absmaxsigma=1000.0, hardlimit=True, bipolar=False, edgebufferfrac=0.0, threshval=0.0, uthreshval=1.0, debug=False, tweaklims=True, zerooutbadfit=True, refine=False, maxguess=0.0, useguess=False, searchfrac=0.5, fastgauss=False, lagmod=1000.0, enforcethresh=True, displayplots=False): """ Parameters ---------- thexcorr_x: 1D float array The time axis of the correlation function thexcorr_y: 1D float array The values of the correlation function lagmin: float The minimum allowed lag time in seconds lagmax: float The maximum allowed lag time in seconds widthlimit: float The maximum allowed peak halfwidth in seconds absmaxsigma hardlimit bipolar: boolean If true find the correlation peak with the maximum absolute value, regardless of sign edgebufferfrac threshval uthreshval debug tweaklims zerooutbadfit refine maxguess useguess searchfrac fastgauss lagmod enforcethresh displayplots Returns ------- """ # set initial parameters # widthlimit is in seconds # maxsigma is in Hz # maxlag is in seconds warnings.filterwarnings("ignore", "Number*") maxlag = np.float64(0.0) maxval = np.float64(0.0) maxsigma = np.float64(0.0) maskval = np.uint16(1) # start out assuming the fit will succeed numlagbins = len(thexcorr_y) binwidth = thexcorr_x[1] - thexcorr_x[0] # define error values failreason = np.uint16(0) FML_BADAMPLOW = np.uint16(0x01) FML_BADAMPHIGH = np.uint16(0x02) FML_BADSEARCHWINDOW = np.uint16(0x04) FML_BADWIDTH = np.uint16(0x08) FML_BADLAG = np.uint16(0x10) FML_HITEDGE = np.uint16(0x20) FML_FITFAIL = np.uint16(0x40) FML_INITFAIL = np.uint16(0x80) # set the search range lowerlim = 0 upperlim = len(thexcorr_x) - 1 if debug: print('initial search indices are', lowerlim, 'to', upperlim, '(', thexcorr_x[lowerlim], thexcorr_x[upperlim], ')') # make an initial guess at the fit parameters for the gaussian # start with finding the maximum value and its location flipfac = 1.0 if useguess: maxindex = tide_util.valtoindex(thexcorr_x, maxguess) else: maxindex, flipfac = maxindex_noedge(thexcorr_x, thexcorr_y, bipolar=bipolar) thexcorr_y *= flipfac maxlag_init = (1.0 * thexcorr_x[maxindex]).astype('float64') maxval_init = thexcorr_y[maxindex].astype('float64') if debug: print('maxindex, maxlag_init, maxval_init:', maxindex, maxlag_init, maxval_init) # then calculate the width of the peak thegrad = np.gradient(thexcorr_y).astype('float64') # the gradient of the correlation function peakpoints = np.where(thexcorr_y > searchfrac * maxval_init, 1, 0) # mask for places where correlaion exceeds serchfrac*maxval_init peakpoints[0] = 0 peakpoints[-1] = 0 peakstart = maxindex + 0 peakend = maxindex + 0 while peakend < (len(thexcorr_x) - 2) and thegrad[peakend + 1] < 0.0 and peakpoints[peakend + 1] == 1: peakend += 1 while peakstart > 1 and thegrad[peakstart - 1] > 0.0 and peakpoints[peakstart - 1] == 1: peakstart -= 1 # This is calculated from first principles, but it's always big by a factor or ~1.4. # Which makes me think I dropped a factor if sqrt(2). So fix that with a final division maxsigma_init = np.float64( ((peakend - peakstart + 1) * binwidth / (2.0 * np.sqrt(-np.log(searchfrac)))) / np.sqrt(2.0)) if debug: print('maxsigma_init:', maxsigma_init) # now check the values for errors if hardlimit: rangeextension = 0.0 else: rangeextension = (lagmax - lagmin) * 0.75 if not ((lagmin - rangeextension - binwidth) <= maxlag_init <= (lagmax + rangeextension + binwidth)): failreason |= (FML_INITFAIL | FML_BADLAG) if (lagmin - rangeextension - binwidth) <= maxlag_init: maxlag_init = lagmin - rangeextension - binwidth else: maxlag_init = lagmax + rangeextension + binwidth if debug: print('bad initial') if maxsigma_init > absmaxsigma: failreason |= (FML_INITFAIL | FML_BADWIDTH) maxsigma_init = absmaxsigma if debug: print('bad initial width - too high') if peakend - peakstart < 2: failreason |= (FML_INITFAIL | FML_BADSEARCHWINDOW) maxsigma_init = np.float64( ((2 + 1) * binwidth / (2.0 * np.sqrt(-np.log(searchfrac)))) / np.sqrt(2.0)) if debug: print('bad initial width - too low') if not (threshval <= maxval_init <= uthreshval) and enforcethresh: failreason |= (FML_INITFAIL | FML_BADAMPLOW) if debug: print('bad initial amp:', maxval_init, 'is less than', threshval) if (maxval_init < 0.0): failreason |= (FML_INITFAIL | FML_BADAMPLOW) maxval_init = 0.0 if debug: print('bad initial amp:', maxval_init, 'is less than 0.0') if (maxval_init > 1.0): failreason |= (FML_INITFAIL | FML_BADAMPHIGH) maxval_init = 1.0 if debug: print('bad initial amp:', maxval_init, 'is greater than 1.0') if failreason > 0 and zerooutbadfit: maxval = np.float64(0.0) maxlag = np.float64(0.0) maxsigma = np.float64(0.0) else: maxval = np.float64(maxval_init) maxlag = np.float64(maxlag_init) maxsigma = np.float64(maxsigma_init) # refine if necessary if refine: data = thexcorr_y[peakstart:peakend] X = thexcorr_x[peakstart:peakend] if fastgauss: # do a non-iterative fit over the top of the peak # 6/12/2015 This is just broken. Gives quantized maxima maxlag = np.float64(1.0 * sum(X * data) / sum(data)) maxsigma = np.float64(np.sqrt(np.abs(np.sum((X - maxlag) ** 2 * data) / np.sum(data)))) maxval = np.float64(data.max()) else: # do a least squares fit over the top of the peak # p0 = np.array([maxval_init, np.fmod(maxlag_init, lagmod), maxsigma_init], dtype='float64') p0 = np.array([maxval_init, maxlag_init, maxsigma_init], dtype='float64') if debug: print('fit input array:', p0) try: plsq, dummy = sp.optimize.leastsq(gaussresiduals, p0, args=(data, X), maxfev=5000) maxval = plsq[0] maxlag = np.fmod((1.0 * plsq[1]), lagmod) maxsigma = plsq[2] except: maxval = np.float64(0.0) maxlag = np.float64(0.0) maxsigma = np.float64(0.0) if debug: print('fit output array:', [maxval, maxlag, maxsigma]) # check for errors in fit fitfail = False failreason = np.uint16(0) if not (0.0 <= np.fabs(maxval) <= 1.0): failreason |= (FML_FITFAIL + FML_BADAMPLOW) if debug: print('bad amp after refinement') fitfail = True if (lagmin > maxlag) or (maxlag > lagmax): failreason |= (FML_FITFAIL + FML_BADLAG) if debug: print('bad lag after refinement') if lagmin > maxlag: maxlag = lagmin else: maxlag = lagmax fitfail = True if maxsigma > absmaxsigma: failreason |= (FML_FITFAIL + FML_BADWIDTH) if debug: print('bad width after refinement') maxsigma = absmaxsigma fitfail = True if not (0.0 < maxsigma): failreason |= (FML_FITFAIL + FML_BADSEARCHWINDOW) if debug: print('bad width after refinement') maxsigma = 0.0 fitfail = True if fitfail: if debug: print('fit fail') if zerooutbadfit: maxval = np.float64(0.0) maxlag = np.float64(0.0) maxsigma = np.float64(0.0) maskval = np.int16(0) # print(maxlag_init, maxlag, maxval_init, maxval, maxsigma_init, maxsigma, maskval, failreason, fitfail) else: maxval = np.float64(maxval_init) maxlag = np.float64(np.fmod(maxlag_init, lagmod)) maxsigma = np.float64(maxsigma_init) if failreason > 0: maskval = np.uint16(0) if debug or displayplots: print("init to final: maxval", maxval_init, maxval, ", maxlag:", maxlag_init, maxlag, ", width:", maxsigma_init, maxsigma) if displayplots and refine and (maskval != 0.0): fig = pl.figure() ax = fig.add_subplot(111) ax.set_title('Data and fit') hiresx = np.arange(X[0], X[-1], (X[1] - X[0]) / 10.0) pl.plot(X, data, 'ro', hiresx, gauss_eval(hiresx, np.array([maxval, maxlag, maxsigma])), 'b-') pl.show() return maxindex, maxlag, flipfac * maxval, maxsigma, maskval, failreason, peakstart, peakend
def autocorrcheck(corrscale, thexcorr, delta=0.1, acampthresh=0.1, aclagthresh=10.0, displayplots=False, prewindow=True, detrendorder=1, debug=False): """ Parameters ---------- corrscale thexcorr delta acampthresh aclagthresh displayplots prewindow detrendorder debug Returns ------- """ lookahead = 2 peaks = tide_fit.peakdetect(thexcorr, x_axis=corrscale, delta=delta, lookahead=lookahead) maxpeaks = np.asarray(peaks[0], dtype='float64') minpeaks = np.asarray(peaks[1], dtype='float64') if len(peaks[0]) > 0: if debug: print(peaks) zeropkindex = np.argmin(abs(maxpeaks[:, 0])) for i in range(zeropkindex + 1, maxpeaks.shape[0]): if maxpeaks[i, 0] > aclagthresh: return None, None if maxpeaks[i, 1] > acampthresh: sidelobetime = maxpeaks[i, 0] sidelobeindex = tide_util.valtoindex(corrscale, sidelobetime) sidelobeamp = thexcorr[sidelobeindex] numbins = 1 while (sidelobeindex + numbins < np.shape(corrscale)[0] - 1) and (thexcorr[sidelobeindex + numbins] > sidelobeamp / 2.0): numbins += 1 sidelobewidth = (corrscale[sidelobeindex + numbins] - corrscale[sidelobeindex]) * 2.0 fitstart = sidelobeindex - numbins fitend = sidelobeindex + numbins sidelobeamp, sidelobetime, sidelobewidth = tide_fit.gaussfit( sidelobeamp, sidelobetime, sidelobewidth, corrscale[fitstart:fitend + 1], thexcorr[fitstart:fitend + 1]) if displayplots: pl.plot( corrscale[fitstart:fitend + 1], thexcorr[fitstart:fitend + 1], 'k', corrscale[fitstart:fitend + 1], tide_fit.gauss_eval( corrscale[fitstart:fitend + 1], [sidelobeamp, sidelobetime, sidelobewidth]), 'r') pl.show() return sidelobetime, sidelobeamp return None, None
def findmaxlag_gauss( thexcorr_x, thexcorr_y, lagmin, lagmax, widthlimit, edgebufferfrac=0.0, threshval=0.0, uthreshval=30.0, debug=False, tweaklims=True, zerooutbadfit=True, refine=False, maxguess=0.0, useguess=False, searchfrac=0.5, fastgauss=False, lagmod=1000.0, enforcethresh=True, absmaxsigma=1000.0, absminsigma=0.1, displayplots=False, ): """ Parameters ---------- thexcorr_x thexcorr_y lagmin lagmax widthlimit edgebufferfrac threshval uthreshval debug tweaklims zerooutbadfit refine maxguess useguess searchfrac fastgauss lagmod enforcethresh displayplots Returns ------- """ # set initial parameters # widthlimit is in seconds # maxsigma is in Hz # maxlag is in seconds warnings.filterwarnings("ignore", "Number*") failreason = np.uint16(0) maxlag = np.float64(0.0) maxval = np.float64(0.0) maxsigma = np.float64(0.0) maskval = np.uint16(1) numlagbins = len(thexcorr_y) binwidth = thexcorr_x[1] - thexcorr_x[0] searchbins = int(widthlimit // binwidth) lowerlim = int(numlagbins * edgebufferfrac) upperlim = numlagbins - lowerlim - 1 if tweaklims: lowerlim = 0 upperlim = numlagbins - 1 while (thexcorr_y[lowerlim + 1] < thexcorr_y[lowerlim]) and (lowerlim + 1) < upperlim: lowerlim += 1 while (thexcorr_y[upperlim - 1] < thexcorr_y[upperlim]) and (upperlim - 1) > lowerlim: upperlim -= 1 FML_BADAMPLOW = np.uint16(0x01) FML_BADAMPHIGH = np.uint16(0x02) FML_BADSEARCHWINDOW = np.uint16(0x04) FML_BADWIDTH = np.uint16(0x08) FML_BADLAG = np.uint16(0x10) FML_HITEDGE = np.uint16(0x20) FML_FITFAIL = np.uint16(0x40) FML_INITFAIL = np.uint16(0x80) # make an initial guess at the fit parameters for the gaussian # start with finding the maximum value if useguess: maxindex = tide_util.valtoindex(thexcorr_x, maxguess) nlowerlim = int(maxindex - widthlimit / 2.0) nupperlim = int(maxindex + widthlimit / 2.0) if nlowerlim < lowerlim: nlowerlim = lowerlim nupperlim = lowerlim + int(widthlimit) if nupperlim > upperlim: nupperlim = upperlim nlowerlim = upperlim - int(widthlimit) maxval_init = thexcorr_y[maxindex].astype("float64") else: maxindex = (np.argmax(thexcorr_y[lowerlim:upperlim]) + lowerlim).astype("int32") maxval_init = thexcorr_y[maxindex].astype("float64") # now get a location for that value maxlag_init = (1.0 * thexcorr_x[maxindex]).astype("float64") # and calculate the width of the peak upperlimit = len(thexcorr_y) - 1 lowerlimit = 0 i = 0 j = 0 while ((maxindex + i <= upperlimit) and (thexcorr_y[maxindex + i] > searchfrac * maxval_init) and (i < searchbins)): i += 1 if (maxindex + i > upperlimit) or (i > searchbins): i -= 1 while ((maxindex - j >= lowerlimit) and (thexcorr_y[maxindex - j] > searchfrac * maxval_init) and (j < searchbins)): j += 1 if (maxindex - j < lowerlimit) or (j > searchbins): j -= 1 # This is calculated from first principles, but it's always big by a factor or ~1.4. # Which makes me think I dropped a factor if sqrt(2). So fix that with a final division maxsigma_init = np.float64( ((i + j + 1) * binwidth / (2.0 * np.sqrt(-np.log(searchfrac)))) / np.sqrt(2.0)) # now check the values for errors and refine if necessary fitend = min(maxindex + i + 1, upperlimit) fitstart = max(1, maxindex - j) if not (lagmin <= maxlag_init <= lagmax): failreason += FML_HITEDGE if maxlag_init <= lagmin: maxlag_init = lagmin else: maxlag_init = lagmax if i + j + 1 < 3: failreason += FML_BADSEARCHWINDOW maxsigma_init = np.float64( (3.0 * binwidth / (2.0 * np.sqrt(-np.log(searchfrac)))) / np.sqrt(2.0)) if maxsigma_init > widthlimit: failreason += FML_BADWIDTH maxsigma_init = widthlimit if (maxval_init < threshval) and enforcethresh: failreason += FML_BADAMPLOW if maxval_init < 0.0: failreason += FML_BADAMPLOW maxval_init = 0.0 if maxval_init > 1.0: failreason |= FML_BADAMPHIGH maxval_init = 1.0 if failreason > 0: maskval = np.uint16(0) if failreason > 0 and zerooutbadfit: maxval = np.float64(0.0) maxlag = np.float64(0.0) maxsigma = np.float64(0.0) else: if refine: data = thexcorr_y[fitstart:fitend] X = thexcorr_x[fitstart:fitend] if fastgauss: # do a non-iterative fit over the top of the peak # 6/12/2015 This is just broken. Gives quantized maxima maxlag = np.float64(1.0 * sum(X * data) / sum(data)) # maxsigma = np.sqrt(abs(np.square(sum((X - maxlag)) * data) / sum(data))) maxsigma = np.float64( np.sqrt( np.fabs(np.sum( (X - maxlag)**2 * data) / np.sum(data)))) maxval = np.float64(data.max()) else: # do a least squares fit over the top of the peak p0 = np.array([maxval_init, maxlag_init, maxsigma_init], dtype="float64") if fitend - fitstart >= 3: plsq, dummy = sp.optimize.leastsq(gaussresiduals, p0, args=(data, X), maxfev=5000) maxval = plsq[0] maxlag = np.fmod((1.0 * plsq[1]), lagmod) maxsigma = plsq[2] # if maxval > 1.0, fit failed catastrophically, zero out or reset to initial value # corrected logic for 1.1.6 if (np.fabs(maxval)) > 1.0 or (lagmin > maxlag) or (maxlag > lagmax): if zerooutbadfit: maxval = np.float64(0.0) maxlag = np.float64(0.0) maxsigma = np.float64(0.0) maskval = np.int16(0) else: maxval = np.float64(maxval_init) maxlag = np.float64(maxlag_init) maxsigma = np.float64(maxsigma_init) if not absminsigma <= maxsigma <= absmaxsigma: if zerooutbadfit: maxval = np.float64(0.0) maxlag = np.float64(0.0) maxsigma = np.float64(0.0) maskval = np.int16(0) else: if maxsigma > absmaxsigma: maxsigma = absmaxsigma else: maxsigma = absminsigma else: maxval = np.float64(maxval_init) maxlag = np.float64(np.fmod(maxlag_init, lagmod)) maxsigma = np.float64(maxsigma_init) if maxval == 0.0: failreason += FML_FITFAIL if not (lagmin <= maxlag <= lagmax): failreason += FML_BADLAG if failreason > 0: maskval = np.uint16(0) if failreason > 0 and zerooutbadfit: maxval = np.float64(0.0) maxlag = np.float64(0.0) maxsigma = np.float64(0.0) if debug or displayplots: print( "init to final: maxval", maxval_init, maxval, ", maxlag:", maxlag_init, maxlag, ", width:", maxsigma_init, maxsigma, ) if displayplots and refine and (maskval != 0.0): fig = plt.figure() ax = fig.add_subplot(111) ax.set_title("Data and fit") hiresx = np.arange(X[0], X[-1], (X[1] - X[0]) / 10.0) plt.plot( X, data, "ro", hiresx, gauss_eval(hiresx, np.array([maxval, maxlag, maxsigma])), "b-", ) plt.show() return maxindex, maxlag, maxval, maxsigma, maskval, failreason, fitstart, fitend
def congrid(xaxis, loc, val, width, kernel='kaiser', cyclic=True, debug=False): """ Perform a convolution gridding operation with a Kaiser-Bessel or Gaussian kernel of width 'width' Parameters ---------- xaxis: array-like The target axis for resampling loc: float The location, in x-axis units, of the sample to be gridded val: float The value to be gridded width: float The width of the gridding kernel in target bins kernel: {'old', 'gauss', 'kaiser'}, optional The type of convolution gridding kernel. Default is 'kaiser'. debug: bool, optional When True, output additional information about the gridding process Returns ------- vals: array-like The input value, convolved with the gridding kernel, projected on to x axis points weights: array-like The values of convolution kernel, projected on to x axis points (used for normalization) indices: array-like The indices along the x axis where the vals and weights fall. Notes ----- See IEEE TRANSACTIONS ON MEDICAL IMAGING. VOL. IO.NO. 3, SEPTEMBER 1991 """ global congridyvals if (congridyvals['kernel'] != kernel) or (congridyvals['width'] != width): if congridyvals['kernel'] != kernel: print(congridyvals['kernel'], '!=', kernel) if congridyvals['width'] != width: print(congridyvals['width'],'!=', width) print('(re)initializing congridyvals') congridyvals = {} congridyvals['kernel'] = kernel congridyvals['width'] = width * 1.0 optsigma = np.array([0.4241, 0.4927, 0.4839, 0.5063, 0.5516, 0.5695, 0.5682, 0.5974]) optbeta = np.array([1.9980, 2.3934, 3.3800, 4.2054, 4.9107, 5.7567, 6.6291, 7.4302]) xstep = xaxis[1] - xaxis[0] if (loc < xaxis[0] - xstep / 2.0 or loc > xaxis[-1] + xstep / 2.0) and not cyclic: print('loc', loc, 'not in range', xaxis[0], xaxis[-1]) # choose the smoothing kernel based on the width if kernel != 'old': if not (1.5 <= width <= 5.0) or (np.fmod(width, 0.5) > 0.0): print('congrid: width is', width) print('congrid: width must be a half-integral value between 1.5 and 5.0 inclusive') sys.exit() else: kernelindex = int((width - 1.5) // 0.5) # find the closest grid point to the target location, calculate relative offsets from this point center = tide_util.valtoindex(xaxis, loc) offset = np.fmod(np.round((loc - xaxis[center]) / xstep, 3), 1.0) # will vary from -0.5 to 0.5 if cyclic: if center == len(xaxis) - 1 and offset > 0.5: center = 0 offset -= 1.0 if center == 0 and offset < -0.5: center = len(xaxis) - 1 offset += 1.0 if not (-0.5 <= offset <= 0.5): print('(loc, xstep, center, offset):', loc, xstep, center, offset) print('xaxis:', xaxis) sys.exit() offsetkey = str(offset) if kernel == 'old': if debug: print('gridding with old kernel') widthinpts = int(np.round(width * 4.6 / xstep)) widthinpts -= widthinpts % 2 - 1 try: yvals = congridyvals[offsetkey] except KeyError: if debug: print('new key:', offsetkey) xvals = np.linspace(-xstep * (widthinpts // 2), xstep * (widthinpts // 2), num=widthinpts, endpoint=True) + offset congridyvals[offsetkey] = tide_fit.gauss_eval(xvals, np.array([1.0, 0.0, width])) yvals = congridyvals[offsetkey] startpt = int(center - widthinpts // 2) indices = range(startpt, startpt + widthinpts) indices = np.remainder(indices, len(xaxis)) if debug: print('center, offset, indices, yvals', center, offset, indices, yvals) return val * yvals, yvals, indices else: offsetinpts = center + offset startpt = int(np.ceil(offsetinpts - width / 2.0)) endpt = int(np.floor(offsetinpts + width / 2.0)) indices = np.remainder(range(startpt, endpt + 1), len(xaxis)) try: yvals = congridyvals[offsetkey] except KeyError: if debug: print('new key:', offsetkey) xvals = indices - center + offset if kernel == 'gauss': sigma = optsigma[kernelindex] congridyvals[offsetkey] = tide_fit.gauss_eval(xvals, np.array([1.0, 0.0, sigma])) elif kernel == 'kaiser': beta = optbeta[kernelindex] congridyvals[offsetkey] = tide_fit.kaiserbessel_eval(xvals, np.array([beta, width / 2.0])) else: print('illegal kernel value in congrid - exiting') sys.exit() yvals = congridyvals[offsetkey] if debug: print('xvals, yvals', xvals, yvals) if debug: print('center, offset, indices, yvals', center, offset, indices, yvals) return val * yvals, yvals, indices
def findmaxlag_gauss_rev( thexcorr_x, thexcorr_y, lagmin, lagmax, widthlimit, absmaxsigma=1000.0, hardlimit=True, bipolar=False, edgebufferfrac=0.0, threshval=0.0, uthreshval=1.0, debug=False, tweaklims=True, zerooutbadfit=True, refine=False, maxguess=0.0, useguess=False, searchfrac=0.5, fastgauss=False, lagmod=1000.0, enforcethresh=True, displayplots=False, ): """ Parameters ---------- thexcorr_x: 1D float array The time axis of the correlation function thexcorr_y: 1D float array The values of the correlation function lagmin: float The minimum allowed lag time in seconds lagmax: float The maximum allowed lag time in seconds widthlimit: float The maximum allowed peak halfwidth in seconds absmaxsigma hardlimit bipolar: boolean If true find the correlation peak with the maximum absolute value, regardless of sign edgebufferfrac threshval uthreshval debug tweaklims zerooutbadfit refine maxguess useguess searchfrac fastgauss lagmod enforcethresh displayplots Returns ------- """ # set initial parameters # widthlimit is in seconds # maxsigma is in Hz # maxlag is in seconds warnings.filterwarnings("ignore", "Number*") maxlag = np.float64(0.0) maxval = np.float64(0.0) maxsigma = np.float64(0.0) maskval = np.uint16(1) # start out assuming the fit will succeed numlagbins = len(thexcorr_y) binwidth = thexcorr_x[1] - thexcorr_x[0] # define error values failreason = np.uint16(0) FML_BADAMPLOW = np.uint16(0x01) FML_BADAMPHIGH = np.uint16(0x02) FML_BADSEARCHWINDOW = np.uint16(0x04) FML_BADWIDTH = np.uint16(0x08) FML_BADLAG = np.uint16(0x10) FML_HITEDGE = np.uint16(0x20) FML_FITFAIL = np.uint16(0x40) FML_INITFAIL = np.uint16(0x80) # set the search range lowerlim = 0 upperlim = len(thexcorr_x) - 1 if debug: print( "initial search indices are", lowerlim, "to", upperlim, "(", thexcorr_x[lowerlim], thexcorr_x[upperlim], ")", ) # make an initial guess at the fit parameters for the gaussian # start with finding the maximum value and its location flipfac = 1.0 if useguess: maxindex = tide_util.valtoindex(thexcorr_x, maxguess) else: maxindex, flipfac = maxindex_noedge(thexcorr_x, thexcorr_y, bipolar=bipolar) thexcorr_y *= flipfac maxlag_init = (1.0 * thexcorr_x[maxindex]).astype("float64") maxval_init = thexcorr_y[maxindex].astype("float64") if debug: print("maxindex, maxlag_init, maxval_init:", maxindex, maxlag_init, maxval_init) # then calculate the width of the peak thegrad = np.gradient(thexcorr_y).astype( "float64") # the gradient of the correlation function peakpoints = np.where( thexcorr_y > searchfrac * maxval_init, 1, 0) # mask for places where correlaion exceeds serchfrac*maxval_init peakpoints[0] = 0 peakpoints[-1] = 0 peakstart = maxindex + 0 peakend = maxindex + 0 while (peakend < (len(thexcorr_x) - 2) and thegrad[peakend + 1] < 0.0 and peakpoints[peakend + 1] == 1): peakend += 1 while peakstart > 1 and thegrad[peakstart - 1] > 0.0 and peakpoints[ peakstart - 1] == 1: peakstart -= 1 # This is calculated from first principles, but it's always big by a factor or ~1.4. # Which makes me think I dropped a factor if sqrt(2). So fix that with a final division maxsigma_init = np.float64( ((peakend - peakstart + 1) * binwidth / (2.0 * np.sqrt(-np.log(searchfrac)))) / np.sqrt(2.0)) if debug: print("maxsigma_init:", maxsigma_init) # now check the values for errors if hardlimit: rangeextension = 0.0 else: rangeextension = (lagmax - lagmin) * 0.75 if not ((lagmin - rangeextension - binwidth) <= maxlag_init <= (lagmax + rangeextension + binwidth)): failreason |= FML_INITFAIL | FML_BADLAG if maxlag_init <= (lagmin - rangeextension - binwidth): maxlag_init = lagmin - rangeextension - binwidth else: maxlag_init = lagmax + rangeextension + binwidth if debug: print("bad initial") if maxsigma_init > absmaxsigma: failreason |= FML_INITFAIL | FML_BADWIDTH maxsigma_init = absmaxsigma if debug: print("bad initial width - too high") if peakend - peakstart < 2: failreason |= FML_INITFAIL | FML_BADSEARCHWINDOW maxsigma_init = np.float64( ((2 + 1) * binwidth / (2.0 * np.sqrt(-np.log(searchfrac)))) / np.sqrt(2.0)) if debug: print("bad initial width - too low") if not (threshval <= maxval_init <= uthreshval) and enforcethresh: failreason |= FML_INITFAIL | FML_BADAMPLOW if debug: print("bad initial amp:", maxval_init, "is less than", threshval) if maxval_init < 0.0: failreason |= FML_INITFAIL | FML_BADAMPLOW maxval_init = 0.0 if debug: print("bad initial amp:", maxval_init, "is less than 0.0") if maxval_init > 1.0: failreason |= FML_INITFAIL | FML_BADAMPHIGH maxval_init = 1.0 if debug: print("bad initial amp:", maxval_init, "is greater than 1.0") if failreason > 0 and zerooutbadfit: maxval = np.float64(0.0) maxlag = np.float64(0.0) maxsigma = np.float64(0.0) else: maxval = np.float64(maxval_init) maxlag = np.float64(maxlag_init) maxsigma = np.float64(maxsigma_init) # refine if necessary if refine: data = thexcorr_y[peakstart:peakend] X = thexcorr_x[peakstart:peakend] if fastgauss: # do a non-iterative fit over the top of the peak # 6/12/2015 This is just broken. Gives quantized maxima maxlag = np.float64(1.0 * sum(X * data) / sum(data)) maxsigma = np.float64( np.sqrt(np.abs(np.sum((X - maxlag)**2 * data) / np.sum(data)))) maxval = np.float64(data.max()) else: # do a least squares fit over the top of the peak # p0 = np.array([maxval_init, np.fmod(maxlag_init, lagmod), maxsigma_init], dtype='float64') p0 = np.array([maxval_init, maxlag_init, maxsigma_init], dtype="float64") if debug: print("fit input array:", p0) try: plsq, dummy = sp.optimize.leastsq(gaussresiduals, p0, args=(data, X), maxfev=5000) maxval = plsq[0] maxlag = np.fmod((1.0 * plsq[1]), lagmod) maxsigma = plsq[2] except: maxval = np.float64(0.0) maxlag = np.float64(0.0) maxsigma = np.float64(0.0) if debug: print("fit output array:", [maxval, maxlag, maxsigma]) # check for errors in fit fitfail = False failreason = np.uint16(0) if not (0.0 <= np.fabs(maxval) <= 1.0): failreason |= FML_FITFAIL + FML_BADAMPLOW if debug: print("bad amp after refinement") fitfail = True if (lagmin > maxlag) or (maxlag > lagmax): failreason |= FML_FITFAIL + FML_BADLAG if debug: print("bad lag after refinement") if lagmin > maxlag: maxlag = lagmin else: maxlag = lagmax fitfail = True if maxsigma > absmaxsigma: failreason |= FML_FITFAIL + FML_BADWIDTH if debug: print("bad width after refinement") maxsigma = absmaxsigma fitfail = True if not (0.0 < maxsigma): failreason |= FML_FITFAIL + FML_BADSEARCHWINDOW if debug: print("bad width after refinement") maxsigma = 0.0 fitfail = True if fitfail: if debug: print("fit fail") if zerooutbadfit: maxval = np.float64(0.0) maxlag = np.float64(0.0) maxsigma = np.float64(0.0) maskval = np.int16(0) # print(maxlag_init, maxlag, maxval_init, maxval, maxsigma_init, maxsigma, maskval, failreason, fitfail) else: maxval = np.float64(maxval_init) maxlag = np.float64(np.fmod(maxlag_init, lagmod)) maxsigma = np.float64(maxsigma_init) if failreason > 0: maskval = np.uint16(0) if debug or displayplots: print( "init to final: maxval", maxval_init, maxval, ", maxlag:", maxlag_init, maxlag, ", width:", maxsigma_init, maxsigma, ) if displayplots and refine and (maskval != 0.0): fig = plt.figure() ax = fig.add_subplot(111) ax.set_title("Data and fit") hiresx = np.arange(X[0], X[-1], (X[1] - X[0]) / 10.0) plt.plot( X, data, "ro", hiresx, gauss_eval(hiresx, np.array([maxval, maxlag, maxsigma])), "b-", ) plt.show() return ( maxindex, maxlag, flipfac * maxval, maxsigma, maskval, failreason, peakstart, peakend, )