Beispiel #1
0
def _procOneVoxelFitcorrx(vox,
                          corr_y,
                          lagtcgenerator,
                          timeaxis,
                          thefitter,
                          disablethresholds=False,
                          despeckle_thresh=5.0,
                          initiallag=None,
                          fixdelay=False,
                          fixeddelayvalue=0.0,
                          rt_floatset=np.float64,
                          rt_floattype='float64'):
    maxindex, maxlag, maxval, maxsigma, maskval, peakstart, peakend, failreason = onecorrfitx(corr_y,
                                                                                              thefitter,
                                                                                              disablethresholds=disablethresholds,
                                                                                              despeckle_thresh=despeckle_thresh,
                                                                                              fixdelay=fixdelay,
                                                                                              fixeddelayvalue=fixeddelayvalue,
                                                                                              initiallag=initiallag,
                                                                                              rt_floatset=rt_floatset,
                                                                                              rt_floattype=rt_floattype)

    if maxval > 0.3:
        displayplots = False

    # question - should maxlag be added or subtracted?  As of 10/18, it is subtracted
    #  potential answer - tried adding, results are terrible.
    thelagtc = rt_floatset(lagtcgenerator.yfromx(timeaxis - maxlag))

    # now tuck everything away in the appropriate output array
    volumetotalinc = 0
    thewindowout = rt_floatset(0.0 * corr_y)
    thewindowout[peakstart:peakend + 1] = 1.0
    if (maskval == 0) and thefitter.zerooutbadfit:
        thetime = rt_floatset(0.0)
        thestrength = rt_floatset(0.0)
        thesigma = rt_floatset(0.0)
        thegaussout = 0.0 * corr_y
        theR2 = rt_floatset(0.0)
    else:
        volumetotalinc = 1
        thetime = rt_floatset(np.fmod(maxlag, thefitter.lagmod))
        thestrength = rt_floatset(maxval)
        thesigma = rt_floatset(maxsigma)
        thegaussout = rt_floatset(0.0 * corr_y)
        thewindowout = rt_floatset(0.0 * corr_y)
        if (not fixdelay) and (maxsigma != 0.0):
            thegaussout = rt_floatset(tide_fit.gauss_eval(thefitter.corrtimeaxis, [maxval, maxlag, maxsigma]))
        else:
            thegaussout = rt_floatset(0.0)
            thewindowout = rt_floatset(0.0)
        theR2 = rt_floatset(thestrength * thestrength)

    return vox, volumetotalinc, thelagtc, thetime, thestrength, thesigma, thegaussout, \
           thewindowout, theR2, maskval, failreason
Beispiel #2
0
def autocorrcheck(corrscale, thexcorr, delta=0.1, acampthresh=0.1, aclagthresh=10.0, displayplots=False, prewindow=True,
                  detrendorder=1, debug=False):
    """

    Parameters
    ----------
    corrscale
    thexcorr
    delta
    acampthresh
    aclagthresh
    displayplots
    prewindow
    detrendorder
    debug

    Returns
    -------

    """
    lookahead = 2
    peaks = tide_fit.peakdetect(thexcorr, x_axis=corrscale, delta=delta, lookahead=lookahead)
    maxpeaks = np.asarray(peaks[0], dtype='float64')
    minpeaks = np.asarray(peaks[1], dtype='float64')
    if len(peaks[0]) > 0:
        if debug:
            print(peaks)
        zeropkindex = np.argmin(abs(maxpeaks[:, 0]))
        for i in range(zeropkindex + 1, maxpeaks.shape[0]):
            if maxpeaks[i, 0] > aclagthresh:
                return None, None
            if maxpeaks[i, 1] > acampthresh:
                sidelobetime = maxpeaks[i, 0]
                sidelobeindex = tide_util.valtoindex(corrscale, sidelobetime)
                sidelobeamp = thexcorr[sidelobeindex]
                numbins = 1
                while (sidelobeindex + numbins < np.shape(corrscale)[0] - 1) and (
                        thexcorr[sidelobeindex + numbins] > sidelobeamp / 2.0):
                    numbins += 1
                sidelobewidth = (corrscale[sidelobeindex + numbins] - corrscale[sidelobeindex]) * 2.0
                fitstart = sidelobeindex - numbins
                fitend = sidelobeindex + numbins
                sidelobeamp, sidelobetime, sidelobewidth = tide_fit.gaussfit(sidelobeamp, sidelobetime, sidelobewidth,
                                                                             corrscale[fitstart:fitend + 1],
                                                                             thexcorr[fitstart:fitend + 1])

                if displayplots:
                    pl.plot(corrscale[fitstart:fitend + 1], thexcorr[fitstart:fitend + 1], 'k',
                            corrscale[fitstart:fitend + 1],
                            tide_fit.gauss_eval(corrscale[fitstart:fitend + 1], [sidelobeamp, sidelobetime, sidelobewidth]),
                            'r')
                    pl.show()
                return sidelobetime, sidelobeamp
    return None, None
Beispiel #3
0
def _procOneVoxelFitcorrx(vox,
                         corrtc,
                         corrscale,
                         genlagtc,
                         initial_fmri_x,
                         optiondict,
                         zerooutbadfit=True,
                         displayplots=False,
                         initiallag=None,
                         rt_floatset=np.float64,
                         rt_floattype='float64'):
    maxindex, maxlag, maxval, maxsigma, maskval, peakstart, peakend, failreason = onecorrfitx(corrtc,
                                                                                             corrscale,
                                                                                             optiondict,
                                                                                             zerooutbadfit=zerooutbadfit,
                                                                                             displayplots=displayplots,
                                                                                             initiallag=initiallag,
                                                                                             rt_floatset=rt_floatset,
                                                                                             rt_floattype=rt_floattype)

    if maxval > 0.3:
        displayplots = False

    # question - should maxlag be added or subtracted?  As of 10/18, it is subtracted
    #  potential answer - tried adding, results are terrible.
    thelagtc = rt_floatset(genlagtc.yfromx(initial_fmri_x - maxlag))

    # now tuck everything away in the appropriate output array
    volumetotalinc = 0
    thewindowout = rt_floatset(0.0 * corrtc)
    thewindowout[peakstart:peakend + 1] = 1.0
    if (maskval == 0) and optiondict['zerooutbadfit']:
        thetime = rt_floatset(0.0)
        thestrength = rt_floatset(0.0)
        thesigma = rt_floatset(0.0)
        thegaussout = 0.0 * corrtc
        theR2 = rt_floatset(0.0)
    else:
        volumetotalinc = 1
        thetime = rt_floatset(np.fmod(maxlag, optiondict['lagmod']))
        thestrength = rt_floatset(maxval)
        thesigma = rt_floatset(maxsigma)
        thegaussout = rt_floatset(0.0 * corrtc)
        thewindowout = rt_floatset(0.0 * corrtc)
        if (not optiondict['fixdelay']) and (maxsigma != 0.0):
            thegaussout = rt_floatset(tide_fit.gauss_eval(corrscale, [maxval, maxlag, maxsigma]))
        else:
            thegaussout = rt_floatset(0.0)
            thewindowout = rt_floatset(0.0)
        theR2 = rt_floatset(thestrength * thestrength)

    return vox, volumetotalinc, thelagtc, thetime, thestrength, thesigma, thegaussout, \
           thewindowout, theR2, maskval, failreason
Beispiel #4
0
def _procOneVoxelFitcorr(vox,
                         corr_y,
                         corr_x,
                         lagtcgenerator,
                         timeaxis,
                         optiondict,
                         zerooutbadfit=True,
                         displayplots=False,
                         initiallag=None,
                         rt_floatset=np.float64,
                         rt_floattype='float64'
                         ):
    maxindex, maxlag, maxval, maxsigma, maskval, failreason = onecorrfit(corr_y,
                                                                         corr_x,
                                                                         optiondict,
                                                                         zerooutbadfit=zerooutbadfit,
                                                                         displayplots=displayplots,
                                                                         initiallag=initiallag,
                                                                         rt_floatset=rt_floatset,
                                                                         rt_floattype=rt_floattype)

    if maxval > 0.3:
        displayplots = False

    # question - should maxlag be added or subtracted?  As of 10/18, it is subtracted
    #  potential answer - tried adding, results are terrible.
    thelagtc = rt_floatset(lagtcgenerator.yfromx(timeaxis - maxlag))

    # now tuck everything away in the appropriate output array
    volumetotalinc = 0
    if (maskval == 0) and optiondict['zerooutbadfit']:
        thetime = rt_floatset(0.0)
        thestrength = rt_floatset(0.0)
        thesigma = rt_floatset(0.0)
        thegaussout = 0.0 * corr_y
        theR2 = rt_floatset(0.0)
    else:
        volumetotalinc = 1
        thetime = rt_floatset(np.fmod(maxlag, optiondict['lagmod']))
        thestrength = rt_floatset(maxval)
        thesigma = rt_floatset(maxsigma)
        if (not optiondict['fixdelay']) and (maxsigma != 0.0):
            thegaussout = rt_floatset(tide_fit.gauss_eval(corr_x, [maxval, maxlag, maxsigma]))
        else:
            thegaussout = rt_floatset(0.0)
        theR2 = rt_floatset(thestrength * thestrength)

    return vox, volumetotalinc, thelagtc, thetime, thestrength, thesigma, thegaussout, theR2, maskval, failreason
Beispiel #5
0
def congrid(xaxis, loc, val, width, kernel='kaiser', cyclic=True, debug=False):
    """
    Perform a convolution gridding operation with a Kaiser-Bessel or Gaussian kernel of width 'width'

    Parameters
    ----------
    xaxis: array-like
        The target axis for resampling
    loc: float
        The location, in x-axis units, of the sample to be gridded
    val: float
        The value to be gridded
    width: float
        The width of the gridding kernel in target bins
    kernel: {'old', 'gauss', 'kaiser'}, optional
        The type of convolution gridding kernel.  Default is 'kaiser'.
    debug: bool, optional
        When True, output additional information about the gridding process

    Returns
    -------
    vals: array-like
        The input value, convolved with the gridding kernel, projected on to x axis points
    weights: array-like
        The values of convolution kernel, projected on to x axis points (used for normalization)
    indices: array-like
        The indices along the x axis where the vals and weights fall.

    Notes
    -----
    See  IEEE TRANSACTIONS ON MEDICAL IMAGING. VOL. IO.NO. 3, SEPTEMBER 1991

    """
    global congridyvals

    if (congridyvals['kernel'] != kernel) or (congridyvals['width'] != width):
        if congridyvals['kernel'] != kernel:
            print(congridyvals['kernel'], '!=', kernel)
        if congridyvals['width'] != width:
            print(congridyvals['width'],'!=', width)
        print('(re)initializing congridyvals')
        congridyvals = {}
        congridyvals['kernel'] = kernel
        congridyvals['width'] = width * 1.0
    optsigma = np.array([0.4241, 0.4927, 0.4839, 0.5063, 0.5516, 0.5695, 0.5682, 0.5974])
    optbeta  = np.array([1.9980, 2.3934, 3.3800, 4.2054, 4.9107, 5.7567, 6.6291, 7.4302])
    xstep = xaxis[1] - xaxis[0]
    if (loc < xaxis[0] - xstep / 2.0 or loc > xaxis[-1] + xstep / 2.0) and not cyclic:
        print('loc', loc, 'not in range', xaxis[0], xaxis[-1])

    # choose the smoothing kernel based on the width
    if kernel != 'old':
        if not (1.5 <= width <= 5.0) or (np.fmod(width, 0.5) > 0.0):
            print('congrid: width is', width)
            print('congrid: width must be a half-integral value between 1.5 and 5.0 inclusive')
            sys.exit()
        else:
            kernelindex = int((width - 1.5) // 0.5)

    # find the closest grid point to the target location, calculate relative offsets from this point
    center = tide_util.valtoindex(xaxis, loc)
    offset = np.fmod(np.round((loc - xaxis[center]) / xstep, 3), 1.0)  # will vary from -0.5 to 0.5
    if cyclic:
        if center == len(xaxis) - 1 and offset > 0.5:
            center = 0
            offset -= 1.0
        if center == 0 and offset < -0.5:
            center = len(xaxis) - 1
            offset += 1.0
    if not (-0.5 <= offset <= 0.5):
        print('(loc, xstep, center, offset):', loc, xstep, center, offset)
        print('xaxis:', xaxis)
        sys.exit()
    offsetkey = str(offset)

    if kernel == 'old':
        if debug:
            print('gridding with old kernel')
        widthinpts = int(np.round(width * 4.6 / xstep))
        widthinpts -= widthinpts % 2 - 1
        try:
            yvals = congridyvals[offsetkey]
        except KeyError:
            if debug:
                print('new key:', offsetkey)
            xvals = np.linspace(-xstep * (widthinpts // 2), xstep * (widthinpts // 2), num=widthinpts,
                                endpoint=True) + offset
            congridyvals[offsetkey] = tide_fit.gauss_eval(xvals, np.array([1.0, 0.0, width]))
            yvals = congridyvals[offsetkey]
        startpt = int(center - widthinpts // 2)
        indices = range(startpt, startpt + widthinpts)
        indices = np.remainder(indices, len(xaxis))
        if debug:
            print('center, offset, indices, yvals', center, offset, indices, yvals)
        return val * yvals, yvals, indices
    else:
        offsetinpts = center + offset
        startpt = int(np.ceil(offsetinpts - width / 2.0))
        endpt = int(np.floor(offsetinpts + width / 2.0))
        indices = np.remainder(range(startpt, endpt + 1), len(xaxis))
        try:
            yvals = congridyvals[offsetkey]
        except KeyError:
            if debug:
                print('new key:', offsetkey)
            xvals = indices - center + offset
            if kernel == 'gauss':
                sigma = optsigma[kernelindex]
                congridyvals[offsetkey] = tide_fit.gauss_eval(xvals, np.array([1.0, 0.0, sigma]))
            elif kernel == 'kaiser':
                beta = optbeta[kernelindex]
                congridyvals[offsetkey] = tide_fit.kaiserbessel_eval(xvals, np.array([beta, width / 2.0]))
            else:
                print('illegal kernel value in congrid - exiting')
                sys.exit()
            yvals = congridyvals[offsetkey]
            if debug:
                print('xvals, yvals', xvals, yvals)
        if debug:
            print('center, offset, indices, yvals', center, offset, indices, yvals)
        return val * yvals, yvals, indices
Beispiel #6
0
def autocorrcheck(corrscale,
                  thexcorr,
                  delta=0.1,
                  acampthresh=0.1,
                  aclagthresh=10.0,
                  displayplots=False,
                  prewindow=True,
                  detrendorder=1,
                  debug=False):
    """

    Parameters
    ----------
    corrscale
    thexcorr
    delta
    acampthresh
    aclagthresh
    displayplots
    prewindow
    detrendorder
    debug

    Returns
    -------

    """
    lookahead = 2
    peaks = tide_fit.peakdetect(thexcorr,
                                x_axis=corrscale,
                                delta=delta,
                                lookahead=lookahead)
    maxpeaks = np.asarray(peaks[0], dtype='float64')
    minpeaks = np.asarray(peaks[1], dtype='float64')
    if len(peaks[0]) > 0:
        if debug:
            print(peaks)
        zeropkindex = np.argmin(abs(maxpeaks[:, 0]))
        for i in range(zeropkindex + 1, maxpeaks.shape[0]):
            if maxpeaks[i, 0] > aclagthresh:
                return None, None
            if maxpeaks[i, 1] > acampthresh:
                sidelobetime = maxpeaks[i, 0]
                sidelobeindex = tide_util.valtoindex(corrscale, sidelobetime)
                sidelobeamp = thexcorr[sidelobeindex]
                numbins = 1
                while (sidelobeindex + numbins < np.shape(corrscale)[0] -
                       1) and (thexcorr[sidelobeindex + numbins] >
                               sidelobeamp / 2.0):
                    numbins += 1
                sidelobewidth = (corrscale[sidelobeindex + numbins] -
                                 corrscale[sidelobeindex]) * 2.0
                fitstart = sidelobeindex - numbins
                fitend = sidelobeindex + numbins
                sidelobeamp, sidelobetime, sidelobewidth = tide_fit.gaussfit(
                    sidelobeamp, sidelobetime, sidelobewidth,
                    corrscale[fitstart:fitend + 1],
                    thexcorr[fitstart:fitend + 1])

                if displayplots:
                    pl.plot(
                        corrscale[fitstart:fitend + 1],
                        thexcorr[fitstart:fitend + 1], 'k',
                        corrscale[fitstart:fitend + 1],
                        tide_fit.gauss_eval(
                            corrscale[fitstart:fitend + 1],
                            [sidelobeamp, sidelobetime, sidelobewidth]), 'r')
                    pl.show()
                return sidelobetime, sidelobeamp
    return None, None
Beispiel #7
0
    def fit(self, incorrfunc):
        # check to make sure xcorr_x and xcorr_y match
        if self.corrtimeaxis is None:
            print("Correlation time axis is not defined - exiting")
            sys.exit()
        if len(self.corrtimeaxis) != len(incorrfunc):
            print(
                "Correlation time axis and values do not match in length (",
                len(self.corrtimeaxis),
                "!=",
                len(incorrfunc),
                "- exiting",
            )
            sys.exit()
        # set initial parameters
        # absmaxsigma is in seconds
        # maxsigma is in Hz
        # maxlag is in seconds
        warnings.filterwarnings("ignore", "Number*")
        failreason = self.FML_NOERROR
        maskval = np.uint16(1)  # start out assuming the fit will succeed
        binwidth = self.corrtimeaxis[1] - self.corrtimeaxis[0]

        # set the search range
        lowerlim = 0
        upperlim = len(self.corrtimeaxis) - 1
        if self.debug:
            print(
                "initial search indices are",
                lowerlim,
                "to",
                upperlim,
                "(",
                self.corrtimeaxis[lowerlim],
                self.corrtimeaxis[upperlim],
                ")",
            )

        # make an initial guess at the fit parameters for the gaussian
        # start with finding the maximum value and its location
        flipfac = 1.0
        corrfunc = incorrfunc + 0.0
        if self.useguess:
            maxindex = tide_util.valtoindex(self.corrtimeaxis, self.maxguess)
            if corrfunc[maxindex] < 0.0:
                flipfac = -1.0
        else:
            maxindex, flipfac = self._maxindex_noedge(corrfunc)
        corrfunc *= flipfac
        maxlag_init = (1.0 * self.corrtimeaxis[maxindex]).astype("float64")
        maxval_init = corrfunc[maxindex].astype("float64")
        if self.debug:
            print(
                "maxindex, maxlag_init, maxval_init:",
                maxindex,
                maxlag_init,
                maxval_init,
            )

        # set the baseline and baselinedev levels
        if (self.functype == "correlation") or (self.functype == "hybrid"):
            baseline = 0.0
            baselinedev = 0.0
        else:
            # for mutual information, there is a nonzero baseline, so we want the difference from that.
            baseline = np.median(corrfunc)
            baselinedev = mad(corrfunc)
        if self.debug:
            print("baseline, baselinedev:", baseline, baselinedev)

        # then calculate the width of the peak
        if self.peakfittype == "fastquad" or self.peakfittype == "COM":
            peakstart = np.max([1, maxindex - 2])
            peakend = np.min([len(self.corrtimeaxis) - 2, maxindex + 2])
        else:
            thegrad = np.gradient(corrfunc).astype(
                "float64")  # the gradient of the correlation function
            if (self.functype == "correlation") or (self.functype == "hybrid"):
                if self.peakfittype == "quad":
                    peakpoints = np.where(
                        corrfunc > maxval_init - 0.05, 1, 0
                    )  # mask for places where correlation exceeds searchfrac*maxval_init
                else:
                    peakpoints = np.where(
                        corrfunc > self.searchfrac * maxval_init, 1, 0
                    )  # mask for places where correlation exceeds searchfrac*maxval_init
            else:
                # for mutual information, there is a flattish, nonzero baseline, so we want the difference from that.
                peakpoints = np.where(
                    corrfunc >
                    (baseline + self.searchfrac * (maxval_init - baseline)),
                    1,
                    0,
                )

            peakpoints[0] = 0
            peakpoints[-1] = 0
            peakstart = np.max([1, maxindex - 1])
            peakend = np.min([len(self.corrtimeaxis) - 2, maxindex + 1])
            if self.debug:
                print("initial peakstart, peakend:", peakstart, peakend)
            if self.functype == "mutualinfo":
                while peakpoints[peakend + 1] == 1:
                    peakend += 1
                while peakpoints[peakstart - 1] == 1:
                    peakstart -= 1
            else:
                while thegrad[peakend + 1] <= 0.0 and peakpoints[peakend +
                                                                 1] == 1:
                    peakend += 1
                while thegrad[peakstart - 1] >= 0.0 and peakpoints[peakstart -
                                                                   1] == 1:
                    peakstart -= 1
            if self.debug:
                print("final peakstart, peakend:", peakstart, peakend)

            # deal with flat peak top
            while (peakend < (len(self.corrtimeaxis) - 3)
                   and corrfunc[peakend] == corrfunc[peakend - 1]):
                peakend += 1
            while peakstart > 2 and corrfunc[peakstart] == corrfunc[peakstart +
                                                                    1]:
                peakstart -= 1
            if self.debug:
                print("peakstart, peakend after flattop correction:",
                      peakstart, peakend)
                print("\n")
                for i in range(peakstart, peakend + 1):
                    print(self.corrtimeaxis[i], corrfunc[i])
                print("\n")
                fig = plt.figure()
                ax = fig.add_subplot(111)
                ax.set_title("Peak sent to fitting routine")
                plt.plot(
                    self.corrtimeaxis[peakstart:peakend + 1],
                    corrfunc[peakstart:peakend + 1],
                    "r",
                )
                plt.show()

            # This is calculated from first principles, but it's always big by a factor or ~1.4.
            #     Which makes me think I dropped a factor if sqrt(2).  So fix that with a final division
            maxsigma_init = np.float64(
                ((peakend - peakstart + 1) * binwidth /
                 (2.0 * np.sqrt(-np.log(self.searchfrac)))) / np.sqrt(2.0))
            if self.debug:
                print("maxsigma_init:", maxsigma_init)

            # now check the values for errors
            if self.hardlimit:
                rangeextension = 0.0
            else:
                rangeextension = (self.lagmax - self.lagmin) * 0.75
            if not ((self.lagmin - rangeextension - binwidth) <= maxlag_init <=
                    (self.lagmax + rangeextension + binwidth)):
                if maxlag_init <= (self.lagmin - rangeextension - binwidth):
                    failreason |= self.FML_INITLAGLOW
                    maxlag_init = self.lagmin - rangeextension - binwidth
                else:
                    failreason |= self.FML_INITLAGHIGH
                    maxlag_init = self.lagmax + rangeextension + binwidth
                if self.debug:
                    print("bad initial")
            if maxsigma_init > self.absmaxsigma:
                failreason |= self.FML_INITWIDTHHIGH
                maxsigma_init = self.absmaxsigma
                if self.debug:
                    print("bad initial width - too high")
            if peakend - peakstart < 2:
                failreason |= self.FML_INITWIDTHLOW
                maxsigma_init = np.float64(
                    ((2 + 1) * binwidth /
                     (2.0 * np.sqrt(-np.log(self.searchfrac)))) / np.sqrt(2.0))
                if self.debug:
                    print("bad initial width - too low")
            if (self.functype == "correlation") or (self.functype == "hybrid"):
                if not (self.lthreshval <= maxval_init <=
                        self.uthreshval) and self.enforcethresh:
                    failreason |= self.FML_INITAMPLOW
                    if self.debug:
                        print(
                            "bad initial amp:",
                            maxval_init,
                            "is less than",
                            self.lthreshval,
                        )
                if maxval_init < 0.0:
                    failreason |= self.FML_INITAMPLOW
                    maxval_init = 0.0
                    if self.debug:
                        print("bad initial amp:", maxval_init,
                              "is less than 0.0")
                if maxval_init > 1.0:
                    failreason |= self.FML_INITAMPHIGH
                    maxval_init = 1.0
                    if self.debug:
                        print("bad initial amp:", maxval_init,
                              "is greater than 1.0")
            else:
                # somewhat different rules for mutual information peaks
                if ((maxval_init - baseline) < self.lthreshval *
                        baselinedev) or (maxval_init < baseline):
                    failreason |= self.FML_INITAMPLOW
                    maxval_init = 0.0
                    if self.debug:
                        print("bad initial amp:", maxval_init,
                              "is less than 0.0")
            if (failreason != self.FML_NOERROR) and self.zerooutbadfit:
                maxval = np.float64(0.0)
                maxlag = np.float64(0.0)
                maxsigma = np.float64(0.0)
            else:
                maxval = np.float64(maxval_init)
                maxlag = np.float64(maxlag_init)
                maxsigma = np.float64(maxsigma_init)

        # refine if necessary
        if self.peakfittype != "None":
            if self.peakfittype == "COM":
                X = self.corrtimeaxis[peakstart:peakend + 1] - baseline
                data = corrfunc[peakstart:peakend + 1]
                maxval = maxval_init
                maxlag = np.sum(X * data) / np.sum(data)
                maxsigma = 10.0
            elif self.peakfittype == "gauss":
                X = self.corrtimeaxis[peakstart:peakend + 1] - baseline
                data = corrfunc[peakstart:peakend + 1]
                # do a least squares fit over the top of the peak
                # p0 = np.array([maxval_init, np.fmod(maxlag_init, lagmod), maxsigma_init], dtype='float64')
                p0 = np.array([maxval_init, maxlag_init, maxsigma_init],
                              dtype="float64")
                if self.debug:
                    print("fit input array:", p0)
                try:
                    plsq, dummy = sp.optimize.leastsq(tide_fit.gaussresiduals,
                                                      p0,
                                                      args=(data, X),
                                                      maxfev=5000)
                    maxval = plsq[0] + baseline
                    maxlag = np.fmod((1.0 * plsq[1]), self.lagmod)
                    maxsigma = plsq[2]
                except:
                    maxval = np.float64(0.0)
                    maxlag = np.float64(0.0)
                    maxsigma = np.float64(0.0)
                if self.debug:
                    print("fit output array:", [maxval, maxlag, maxsigma])
            elif self.peakfittype == "fastgauss":
                X = self.corrtimeaxis[peakstart:peakend + 1] - baseline
                data = corrfunc[peakstart:peakend + 1]
                # do a non-iterative fit over the top of the peak
                # 6/12/2015  This is just broken.  Gives quantized maxima
                maxlag = np.float64(1.0 * np.sum(X * data) / np.sum(data))
                maxsigma = np.float64(
                    np.sqrt(
                        np.abs(np.sum((X - maxlag)**2 * data) / np.sum(data))))
                maxval = np.float64(data.max()) + baseline
            elif self.peakfittype == "fastquad":
                maxlag, maxval, maxsigma, ismax, badfit = tide_fit.refinepeak_quad(
                    self.corrtimeaxis, corrfunc, maxindex)
            elif self.peakfittype == "quad":
                X = self.corrtimeaxis[peakstart:peakend + 1]
                data = corrfunc[peakstart:peakend + 1]
                try:
                    thecoffs = np.polyfit(X, data, 2)
                    a = thecoffs[0]
                    b = thecoffs[1]
                    c = thecoffs[2]
                    maxlag = -b / (2.0 * a)
                    maxval = a * maxlag * maxlag + b * maxlag + c
                    maxsigma = 1.0 / np.fabs(a)
                    if self.debug:
                        print("poly coffs:", a, b, c)
                        print("maxlag, maxval, maxsigma:", maxlag, maxval,
                              maxsigma)
                except np.lib.polynomial.RankWarning:
                    maxlag = 0.0
                    maxval = 0.0
                    maxsigma = 0.0
                if self.debug:
                    print("\n")
                    for i in range(len(X)):
                        print(X[i], data[i])
                    print("\n")
                    fig = plt.figure()
                    ax = fig.add_subplot(111)
                    ax.set_title("Peak and fit")
                    plt.plot(X, data, "r")
                    plt.plot(X, c + b * X + a * X * X, "b")
                    plt.show()

            else:
                print("illegal peak refinement type")

            # check for errors in fit
            fitfail = False
            if self.bipolar:
                lowestcorrcoeff = -1.0
            else:
                lowestcorrcoeff = 0.0
            if (self.functype == "correlation") or (self.functype == "hybrid"):
                if maxval < lowestcorrcoeff:
                    failreason |= self.FML_FITAMPLOW
                    maxval = lowestcorrcoeff
                    if self.debug:
                        print("bad fit amp: maxval is lower than lower limit")
                    fitfail = True
                if np.abs(maxval) > 1.0:
                    if not self.allowhighfitamps:
                        failreason |= self.FML_FITAMPHIGH
                        if self.debug:
                            print(
                                "bad fit amp: magnitude of",
                                maxval,
                                "is greater than 1.0",
                            )
                        fitfail = True
                    maxval = 1.0 * np.sign(maxval)
            else:
                # different rules for mutual information peaks
                if ((maxval - baseline) <
                        self.lthreshval * baselinedev) or (maxval < baseline):
                    failreason |= self.FML_FITAMPLOW
                    if self.debug:
                        if (maxval - baseline) < self.lthreshval * baselinedev:
                            print(
                                "FITAMPLOW: maxval - baseline:",
                                maxval - baseline,
                                " < lthreshval * baselinedev:",
                                self.lthreshval * baselinedev,
                            )
                        if maxval < baseline:
                            print("FITAMPLOW: maxval < baseline:", maxval,
                                  baseline)
                    maxval_init = 0.0
                    if self.debug:
                        print("bad fit amp: maxval is lower than lower limit")
            if (self.lagmin > maxlag) or (maxlag > self.lagmax):
                if self.debug:
                    print("bad lag after refinement")
                if self.lagmin > maxlag:
                    failreason |= self.FML_FITLAGLOW
                    maxlag = self.lagmin
                else:
                    failreason |= self.FML_FITLAGHIGH
                    maxlag = self.lagmax
                fitfail = True
            if maxsigma > self.absmaxsigma:
                failreason |= self.FML_FITWIDTHHIGH
                if self.debug:
                    print("bad width after refinement:", maxsigma, ">",
                          self.absmaxsigma)
                maxsigma = self.absmaxsigma
                fitfail = True
            if maxsigma < self.absminsigma:
                failreason |= self.FML_FITWIDTHLOW
                if self.debug:
                    print("bad width after refinement:", maxsigma, "<",
                          self.absminsigma)
                maxsigma = self.absminsigma
                fitfail = True
            if fitfail:
                if self.debug:
                    print("fit fail")
                if self.zerooutbadfit:
                    maxval = np.float64(0.0)
                    maxlag = np.float64(0.0)
                    maxsigma = np.float64(0.0)
                maskval = np.uint16(0)
            # print(maxlag_init, maxlag, maxval_init, maxval, maxsigma_init, maxsigma, maskval, failreason, fitfail)
        else:
            maxval = np.float64(maxval_init)
            maxlag = np.float64(np.fmod(maxlag_init, self.lagmod))
            maxsigma = np.float64(maxsigma_init)
            if failreason != self.FML_NOERROR:
                maskval = np.uint16(0)

        if self.debug or self.displayplots:
            print(
                "init to final: maxval",
                maxval_init,
                maxval,
                ", maxlag:",
                maxlag_init,
                maxlag,
                ", width:",
                maxsigma_init,
                maxsigma,
            )
        if self.displayplots and (self.peakfittype != "None") and (maskval !=
                                                                   0.0):
            fig = plt.figure()
            ax = fig.add_subplot(111)
            ax.set_title("Data and fit")
            hiresx = np.arange(X[0], X[-1], (X[1] - X[0]) / 10.0)
            plt.plot(
                X,
                data,
                "ro",
                hiresx,
                tide_fit.gauss_eval(hiresx,
                                    np.array([maxval, maxlag, maxsigma])),
                "b-",
            )
            plt.show()
        return (
            maxindex,
            maxlag,
            flipfac * maxval,
            maxsigma,
            maskval,
            failreason,
            peakstart,
            peakend,
        )
Beispiel #8
0
def check_autocorrelation(
    corrscale,
    thexcorr,
    delta=0.1,
    acampthresh=0.1,
    aclagthresh=10.0,
    displayplots=False,
    detrendorder=1,
):
    """Check for autocorrelation in an array.

    Parameters
    ----------
    corrscale
    thexcorr
    delta
    acampthresh
    aclagthresh
    displayplots
    windowfunc
    detrendorder

    Returns
    -------
    sidelobetime
    sidelobeamp
    """
    lookahead = 2
    peaks = tide_fit.peakdetect(thexcorr, x_axis=corrscale, delta=delta, lookahead=lookahead)
    maxpeaks = np.asarray(peaks[0], dtype="float64")
    if len(peaks[0]) > 0:
        LGR.debug(peaks)
        zeropkindex = np.argmin(abs(maxpeaks[:, 0]))
        for i in range(zeropkindex + 1, maxpeaks.shape[0]):
            if maxpeaks[i, 0] > aclagthresh:
                return None, None
            if maxpeaks[i, 1] > acampthresh:
                sidelobetime = maxpeaks[i, 0]
                sidelobeindex = tide_util.valtoindex(corrscale, sidelobetime)
                sidelobeamp = thexcorr[sidelobeindex]
                numbins = 1
                while (sidelobeindex + numbins < np.shape(corrscale)[0] - 1) and (
                    thexcorr[sidelobeindex + numbins] > sidelobeamp / 2.0
                ):
                    numbins += 1
                sidelobewidth = (
                    corrscale[sidelobeindex + numbins] - corrscale[sidelobeindex]
                ) * 2.0
                fitstart = sidelobeindex - numbins
                fitend = sidelobeindex + numbins
                sidelobeamp, sidelobetime, sidelobewidth = tide_fit.gaussfit(
                    sidelobeamp,
                    sidelobetime,
                    sidelobewidth,
                    corrscale[fitstart : fitend + 1],
                    thexcorr[fitstart : fitend + 1],
                )

                if displayplots:
                    plt.plot(
                        corrscale[fitstart : fitend + 1],
                        thexcorr[fitstart : fitend + 1],
                        "k",
                        corrscale[fitstart : fitend + 1],
                        tide_fit.gauss_eval(
                            corrscale[fitstart : fitend + 1],
                            [sidelobeamp, sidelobetime, sidelobewidth],
                        ),
                        "r",
                    )
                    plt.show()
                return sidelobetime, sidelobeamp
    return None, None
def test_findmaxlag(display=False, debug=False):
    textfilename = op.join(get_test_data_path(), 'lt_rt.txt')
    display = False
    debug = False

    # set default variable values
    searchfrac = 0.75
    limitfit = False

    indata = tide_io.readvecs(textfilename)
    xvecs = indata[0, :]
    yvecs = indata[1, :]
    testmaxval = 0.8
    testmaxlag = 8.0
    testmaxsigma = 5.0
    yvecs = tide_fit.gauss_eval(xvecs, np.array([testmaxval, testmaxlag,
                                                 testmaxsigma]))
    lagmin = -20
    lagmax = 20
    widthlimit = 1000.0
    absmaxsigma = 1000.0

    (maxindex, maxlag, maxval, maxsigma, maskval,
     failreason, peakstart, peakend) = tide_fit.findmaxlag_gauss(
         xvecs,
         yvecs,
         lagmin, lagmax, widthlimit,
         tweaklims=False,
         refine=True,
         debug=debug,
         searchfrac=searchfrac,
         zerooutbadfit=False)

    (maxindexr, maxlagr, maxvalr, maxsigmar, maskvalr,
     failreasonr, peakstartr, peakendr) = tide_fit.findmaxlag_gauss_rev(
         xvecs,
         yvecs,
         lagmin, lagmax, widthlimit,
         absmaxsigma=absmaxsigma,
         tweaklims=False,
         refine=True,
         debug=debug,
         searchfrac=searchfrac,
         zerooutbadfit=False)

    print('final results:', maxindex, maxlag, maxval, maxsigma, maskval,
          failreason, peakstart, peakend)
    print('final results:', maxindexr, maxlagr, maxvalr, maxsigmar, maskvalr,
          failreasonr, peakstartr, peakendr)
    oversampfactor = 10
    gauss_xvecs = arange(xvecs[0], xvecs[-1],
                         (xvecs[1]-xvecs[0]) / oversampfactor, dtype='float')
    gauss_yvecs = tide_fit.gauss_eval(gauss_xvecs, (maxval, maxlag, maxsigma))
    gauss_yvecsr = tide_fit.gauss_eval(gauss_xvecs, (maxvalr, maxlagr,
                                                     maxsigmar))
    if display:
        fig = plt.figure()
        ax = fig.add_subplot(1, 1, 1)
        ax.plot(xvecs,yvecs,'r')
        ax.plot(gauss_xvecs[(peakstart*oversampfactor):(peakend*oversampfactor+1)], 0.1 + gauss_yvecs[(peakstart*oversampfactor):(peakend*oversampfactor+1)],'g.')
        ax.plot(gauss_xvecs[(peakstartr*oversampfactor):(peakendr*oversampfactor+1)], 0.2 + gauss_yvecsr[(peakstartr*oversampfactor):(peakendr*oversampfactor+1)],'b.')
        #ax.set_xlim((lagmin, lagmax))
        plt.show()
Beispiel #10
0
def congrid(xaxis, loc, val, width, kernel="kaiser", cyclic=True, debug=False):
    """
    Perform a convolution gridding operation with a Kaiser-Bessel or Gaussian kernel of width 'width'.  Grid
    parameters are cached for performance.

    Parameters
    ----------
    xaxis: array-like
        The target axis for resampling
    loc: float
        The location, in x-axis units, of the sample to be gridded
    val: float
        The value to be gridded
    width: float
        The width of the gridding kernel in target bins
    kernel: {'old', 'gauss', 'kaiser'}, optional
        The type of convolution gridding kernel.  Default is 'kaiser'.
    cyclic: bool, optional
        When True, gridding wraps around the endpoints of xaxis.  Default is True.
    debug: bool, optional
        When True, output additional information about the gridding process

    Returns
    -------
    vals: array-like
        The input value, convolved with the gridding kernel, projected on to x axis points
    weights: array-like
        The values of convolution kernel, projected on to x axis points (used for normalization)
    indices: array-like
        The indices along the x axis where the vals and weights fall.

    Notes
    -----
    See  IEEE TRANSACTIONS ON MEDICAL IMAGING. VOL. IO.NO. 3, SEPTEMBER 1991

    """
    global congridyvals

    if (congridyvals["kernel"] != kernel) or (congridyvals["width"] != width):
        if congridyvals["kernel"] != kernel:
            if debug:
                print(congridyvals["kernel"], "!=", kernel)
        if congridyvals["width"] != width:
            if debug:
                print(congridyvals["width"], "!=", width)
        if debug:
            print("(re)initializing congridyvals")
        congridyvals = {}
        congridyvals["kernel"] = kernel
        congridyvals["width"] = width * 1.0
    optsigma = np.array([0.4241, 0.4927, 0.4839, 0.5063, 0.5516, 0.5695, 0.5682, 0.5974])
    optbeta = np.array([1.9980, 2.3934, 3.3800, 4.2054, 4.9107, 5.7567, 6.6291, 7.4302])
    xstep = xaxis[1] - xaxis[0]
    if (loc < xaxis[0] - xstep / 2.0 or loc > xaxis[-1] + xstep / 2.0) and not cyclic:
        print("loc", loc, "not in range", xaxis[0], xaxis[-1])

    # choose the smoothing kernel based on the width
    if kernel != "old":
        if not (1.5 <= width <= 5.0) or (np.fmod(width, 0.5) > 0.0):
            print("congrid: width is", width)
            print("congrid: width must be a half-integral value between 1.5 and 5.0 inclusive")
            sys.exit()
        else:
            kernelindex = int((width - 1.5) // 0.5)

    # find the closest grid point to the target location, calculate relative offsets from this point
    center = tide_util.valtoindex(xaxis, loc)
    offset = np.fmod(np.round((loc - xaxis[center]) / xstep, 3), 1.0)  # will vary from -0.5 to 0.5
    if cyclic:
        if center == len(xaxis) - 1 and offset > 0.5:
            center = 0
            offset -= 1.0
        if center == 0 and offset < -0.5:
            center = len(xaxis) - 1
            offset += 1.0
    if not (-0.5 <= offset <= 0.5):
        print("(loc, xstep, center, offset):", loc, xstep, center, offset)
        print("xaxis:", xaxis)
        sys.exit()
    offsetkey = str(offset)

    if kernel == "old":
        if debug:
            print("gridding with old kernel")
        widthinpts = int(np.round(width * 4.6 / xstep))
        widthinpts -= widthinpts % 2 - 1
        try:
            yvals = congridyvals[offsetkey]
        except KeyError:
            if debug:
                print("new key:", offsetkey)
            xvals = (
                np.linspace(
                    -xstep * (widthinpts // 2),
                    xstep * (widthinpts // 2),
                    num=widthinpts,
                    endpoint=True,
                )
                + offset
            )
            congridyvals[offsetkey] = tide_fit.gauss_eval(xvals, np.array([1.0, 0.0, width]))
            yvals = congridyvals[offsetkey]
        startpt = int(center - widthinpts // 2)
        indices = range(startpt, startpt + widthinpts)
        indices = np.remainder(indices, len(xaxis))
        if debug:
            print("center, offset, indices, yvals", center, offset, indices, yvals)
        return val * yvals, yvals, indices
    else:
        offsetinpts = center + offset
        startpt = int(np.ceil(offsetinpts - width / 2.0))
        endpt = int(np.floor(offsetinpts + width / 2.0))
        indices = np.remainder(range(startpt, endpt + 1), len(xaxis))
        try:
            yvals = congridyvals[offsetkey]
        except KeyError:
            if debug:
                print("new key:", offsetkey)
            xvals = indices - center + offset
            if kernel == "gauss":
                sigma = optsigma[kernelindex]
                congridyvals[offsetkey] = tide_fit.gauss_eval(xvals, np.array([1.0, 0.0, sigma]))
            elif kernel == "kaiser":
                beta = optbeta[kernelindex]
                congridyvals[offsetkey] = tide_fit.kaiserbessel_eval(
                    xvals, np.array([beta, width / 2.0])
                )
            else:
                print("illegal kernel value in congrid - exiting")
                sys.exit()
            yvals = congridyvals[offsetkey]
            if debug:
                print("xvals, yvals", xvals, yvals)
        if debug:
            print("center, offset, indices, yvals", center, offset, indices, yvals)
        return val * yvals, yvals, indices
Beispiel #11
0
def test_findmaxlag(display=False, fittype="gauss", debug=False):
    textfilename = op.join(get_examples_path(), "lt_rt.txt")

    # set default variable values
    searchfrac = 0.75

    indata = tide_io.readvecs(textfilename, debug=debug)
    xvecs = indata[0, :]
    yvecs = indata[1, :]

    # set some fit parameters
    lagmin = -20.0
    lagmax = 20.0
    widthlimit = 1000.0
    absmaxsigma = 1000.0
    absminsigma = 0.1
    absmaxval = 1.0
    absminval = 0.0

    # test over the lag range
    testmaxval = 0.8
    testmaxsigma = 5.0
    testlags = np.linspace(-25.0, 25.0, 50, endpoint=True)
    testsigmas = np.full((len(testlags)), testmaxsigma, dtype=np.float64)
    testvals = np.full((len(testlags)), testmaxval, dtype=np.float64)

    fml_maxlags = np.zeros(len(testlags), dtype=np.float64)
    fml_maxsigmas = np.zeros(len(testlags), dtype=np.float64)
    fml_maxvals = np.zeros(len(testlags), dtype=np.float64)
    fml_lfailreasons = np.zeros(len(testlags), dtype=np.uint16)
    fmlc_maxlags = np.zeros(len(testlags), dtype=np.float64)
    fmlc_maxsigmas = np.zeros(len(testlags), dtype=np.float64)
    fmlc_maxvals = np.zeros(len(testlags), dtype=np.float64)
    fmlc_lfailreasons = np.zeros(len(testlags), dtype=np.uint16)

    # initialize the correlation fitter
    thefitter = tide_classes.SimilarityFunctionFitter(
        corrtimeaxis=xvecs,
        lagmin=lagmin,
        lagmax=lagmax,
        absmaxsigma=absmaxsigma,
        absminsigma=absminsigma,
        peakfittype=fittype,
        debug=debug,
        searchfrac=searchfrac,
        zerooutbadfit=False,
    )

    for i in range(len(testlags)):
        yvecs = tide_fit.gauss_eval(
            xvecs, np.array([testvals[i], testlags[i], testsigmas[i]]))

        print()
        print()
        print()
        (
            maxindex,
            fml_maxlags[i],
            fml_maxvals[i],
            fml_maxsigmas[i],
            maskval,
            fml_lfailreasons[i],
            peakstart,
            peakend,
        ) = tide_fit.findmaxlag_gauss(
            xvecs,
            yvecs,
            lagmin,
            lagmax,
            widthlimit,
            tweaklims=False,
            refine=True,
            debug=debug,
            searchfrac=searchfrac,
            absmaxsigma=absmaxsigma,
            absminsigma=absminsigma,
            zerooutbadfit=False,
        )

        print()
        print()
        print()
        (
            maxindexc,
            fmlc_maxlags[i],
            fmlc_maxvals[i],
            fmlc_maxsigmas[i],
            maskvalc,
            fmlc_lfailreasons[i],
            peakstartc,
            peakendc,
        ) = thefitter.fit(yvecs)
        print(
            maxindexc,
            fmlc_maxlags[i],
            fmlc_maxvals[i],
            fmlc_maxsigmas[i],
            maskvalc,
            fmlc_lfailreasons[i],
            peakstartc,
            peakendc,
        )

    if debug:
        print("findmaxlag_gauss results over lag range")
        for i in range(len(testlags)):
            print(testlags[i], fml_maxlags[i], fml_lfailreasons[i])

    assert eval_fml_result(lagmin, lagmax, testlags, fml_maxlags,
                           fml_lfailreasons)
    assert eval_fml_result(absminval, absmaxval, testvals, fml_maxvals,
                           fml_lfailreasons)
    assert eval_fml_result(absminsigma, absmaxsigma, testsigmas, fml_maxsigmas,
                           fml_lfailreasons)

    assert eval_fml_result(lagmin, lagmax, testlags, fmlc_maxlags,
                           fmlc_lfailreasons)
    assert eval_fml_result(absminval, absmaxval, testvals, fmlc_maxvals,
                           fmlc_lfailreasons)
    assert eval_fml_result(absminsigma, absmaxsigma, testsigmas,
                           fmlc_maxsigmas, fmlc_lfailreasons)

    if display:
        fig = plt.figure()
        ax = fig.add_subplot(1, 1, 1)
        ax.plot(testlags, fml_maxlags, "r")
        ax.plot(testlags, fmlc_maxlags, "b")
        ax.legend(["findmaxlag_gauss", "classes"])
        plt.show()

    # now test over range of sigmas
    testlag = 5.0
    testsigmas = np.asarray([
        0.01,
        0.02,
        0.05,
        0.1,
        0.2,
        0.5,
        1.0,
        2.0,
        5.0,
        10.0,
        20.0,
        50.0,
        100.0,
        200.0,
        500.0,
        1000.0,
        2000.0,
    ])
    testlags = np.full((len(testsigmas)), testlag, dtype=np.float64)
    testvals = np.full((len(testsigmas)), testmaxval, dtype=np.float64)

    fml_maxlags = np.zeros(len(testsigmas), dtype=np.float64)
    fml_maxsigmas = np.zeros(len(testsigmas), dtype=np.float64)
    fml_maxvals = np.zeros(len(testsigmas), dtype=np.float64)
    fml_wfailreasons = np.zeros(len(testsigmas), dtype=np.uint16)
    fmlc_maxlags = np.zeros(len(testsigmas), dtype=np.float64)
    fmlc_maxsigmas = np.zeros(len(testsigmas), dtype=np.float64)
    fmlc_maxvals = np.zeros(len(testsigmas), dtype=np.float64)
    fmlc_wfailreasons = np.zeros(len(testsigmas), dtype=np.uint16)
    peakstartc = np.zeros(len(testsigmas), dtype=np.int32)
    peakendc = np.zeros(len(testsigmas), dtype=np.int32)

    for i in range(len(testsigmas)):
        yvecs = tide_fit.gauss_eval(
            xvecs, np.array([testvals[i], testlags[i], testsigmas[i]]))

        print()
        print()
        print()
        (
            maxindex,
            fml_maxlags[i],
            fml_maxvals[i],
            fml_maxsigmas[i],
            maskval,
            fml_wfailreasons[i],
            peakstart,
            peakend,
        ) = tide_fit.findmaxlag_gauss(
            xvecs,
            yvecs,
            lagmin,
            lagmax,
            widthlimit,
            tweaklims=False,
            refine=True,
            debug=debug,
            searchfrac=searchfrac,
            absmaxsigma=absmaxsigma,
            absminsigma=absminsigma,
            zerooutbadfit=False,
        )

        print()
        print()
        print()
        (
            maxindexc,
            fmlc_maxlags[i],
            fmlc_maxvals[i],
            fmlc_maxsigmas[i],
            maskvalc,
            fmlc_wfailreasons[i],
            peakstartc[i],
            peakendc[i],
        ) = thefitter.fit(yvecs)
        print(
            maxindexc,
            fmlc_maxlags[i],
            fmlc_maxvals[i],
            fmlc_maxsigmas[i],
            maskvalc,
            fmlc_wfailreasons[i],
            peakstartc[i],
            peakendc[i],
        )

    if debug:
        print("findmaxlag_gauss results over sigma range")
        for i in range(len(testsigmas)):
            print(
                testsigmas[i],
                fml_maxsigmas[i],
                fmlc_maxlags[i],
                fmlc_maxvals[i],
                fml_wfailreasons[i],
            )

        print("\nfitter class results over lag range")
        for i in range(len(testsigmas)):
            print(
                testsigmas[i],
                fmlc_maxsigmas[i],
                fmlc_maxlags[i],
                fmlc_maxvals[i],
                peakstartc[i],
                peakendc[i],
                fmlc_wfailreasons[i],
                thefitter.diagnosefail(np.uint32(fmlc_wfailreasons[i])),
            )

    if display:
        fig = plt.figure()
        ax = fig.add_subplot(1, 1, 1)
        ax.loglog(testsigmas, fml_maxsigmas, "r")
        ax.loglog(testsigmas, fmlc_maxsigmas, "b")
        ax.legend(["findmaxlag_gauss", "classes"])
        plt.show()

    assert eval_fml_result(lagmin, lagmax, testlags, fml_maxlags,
                           fml_wfailreasons)
    # assert eval_fml_result(absminval, absmaxval, testvals, fml_maxvals, fml_wfailreasons)
    assert eval_fml_result(absminsigma, absmaxsigma, testsigmas, fml_maxsigmas,
                           fml_wfailreasons)

    assert eval_fml_result(lagmin, lagmax, testlags, fmlc_maxlags,
                           fmlc_wfailreasons)
    assert eval_fml_result(absminval, absmaxval, testvals, fmlc_maxvals,
                           fmlc_wfailreasons)
    assert eval_fml_result(absminsigma, absmaxsigma, testsigmas,
                           fmlc_maxsigmas, fmlc_wfailreasons)