Esempio n. 1
0
def _show_plots(target, fitted, wt, wo, corrected):
    tau_NP, tau_P, attenuator, rate = target
    tau_NP_f, tau_P_f, attenuation_f, rate_f = fitted

    # Plot the results
    sim_pars = (r'Sim $\tau_{NP}=%g\,{\rm %s}$,  $\tau_{P}=%g\,{\rm %s}$,  ${\rm attenuator}=%g$'
               )%(tau_NP, DEADTIME_UNITS, tau_P, DEADTIME_UNITS, attenuator)
    fit_pars = (r'Fit $\tau_{NP}=%s$,  $\tau_P=%s$,  ${\rm attenuator}=%.2f$'
               )%(
                   ("%.2f"%tau_NP_f[0] if np.inf > tau_NP_f[1] > 0 else "-"),
                   ("%.2f"%tau_P_f[0] if np.inf > tau_P_f[1] > 0 else "-"),
                   1./attenuation_f[0],
               )
    title = '\n'.join((sim_pars, fit_pars))
    import pylab
    pylab.subplot(211)
    #pylab.errorbar(rate, rate_f[0], yerr=rate_f[1], fmt='c.', label='fitted rate')
    #mincident = np.linspace(rate[0], rate[-1], 400)
    #munattenuated = expected_rate(mincident, tau_NP_f[0], tau_P_f[0])
    #mattenuated = expected_rate(mincident/attenuator, tau_NP_f[0], tau_P_f[0])
    #minc = np.hstack((mincident, 0., mincident))
    #mobs = np.hstack((munattenuated, np.NaN, mattenuated))
    #pylab.plot(minc, mobs, 'c-', label='expected rate')
    pylab.errorbar(rate, uval(corrected), yerr=udev(corrected), fmt='r.', label='corrected rate')
    _show_rates(rate, wo, wt, attenuator, tau_NP_f[0], tau_P_f[0])
    pylab.subplot(212)
    _show_droop(rate, wo, wt, attenuator)
    pylab.suptitle(title)

    #pylab.figure(); _show_inversion(wo, tau_P_f, tau_NP_f)
    pylab.show()
Esempio n. 2
0
def fit_dead_time(datasets, source='detector', mode='auto'):
    time = [data.monitor.count_time for data in datasets]
    if source == 'monitor':
        counts = [data.monitor.counts for data in datasets]
    elif source == 'detector':
        counts = [data.detector.counts for data in datasets]
    else:
        raise ValueError("Source should be detector or monitor")
    data = datasets[-1]
    index = ~data.mask if data.mask is not None else slice(None, None)
    pairs = [(c[index], t[index]) for c, t in zip(counts, time)]
    res = deadtime_from_counts(pairs, mode=mode)
    tau_NP, tau_P, attenuators, rates = res
    attenuators = 1.0/uarray(*attenuators)
    attenuators = list(zip(uval(attenuators), udev(attenuators)))
    dead_time = DeadTimeData(datasets, tau_NP, tau_P, attenuators, rates, index)

    return dead_time
Esempio n. 3
0
def fit_dead_time(datasets, source='detector', mode='auto'):
    time = [data.monitor.count_time for data in datasets]
    if source == 'monitor':
        counts = [data.monitor.counts for data in datasets]
    elif source == 'detector':
        counts = [data.detector.counts for data in datasets]
    else:
        raise ValueError("Source should be detector or monitor")
    data = datasets[-1]
    index = ~data.mask if data.mask is not None else slice(None, None)
    pairs = [(c[index], t[index]) for c, t in zip(counts, time)]
    res = deadtime_from_counts(pairs, mode=mode)
    tau_NP, tau_P, attenuators, rates = res
    attenuators = 1.0 / uarray(*attenuators)
    attenuators = list(zip(uval(attenuators), udev(attenuators)))
    dead_time = DeadTimeData(datasets, tau_NP, tau_P, attenuators, rates,
                             index)

    return dead_time
Esempio n. 4
0
def interp_err(x, xp, fp, dfp, left=None, right=None):
    """
    Linear interpolation of x into points (xk,yk +/- dyk).

    xp is assumed to be in ascending order.

    left is the uncertainty value to return for points before the range of xp,
    or None for the initial value, fp[0].

    right is the uncertainty value to return for points after the range of xp,
    or None for the final value, fp[-1].
    """
    is_scalar_x = np.isscalar(x)
    if is_scalar_x:
        fp = ufloat(fp, dfp)
        f = interp([x], xp, fp, left, right)[0]
        return f.n, f.s
    else:
        fp = uarray(fp, dfp)
        f = interp(x, xp, fp, left, right)
        return uval(f), udev(f)
Esempio n. 5
0
def interp_err(x, xp, fp, dfp, left=None, right=None):
    """
    Linear interpolation of x into points (xk,yk +/- dyk).

    xp is assumed to be in ascending order.

    left is the uncertainty value to return for points before the range of xp,
    or None for the initial value, fp[0].

    right is the uncertainty value to return for points after the range of xp,
    or None for the final value, fp[-1].
    """
    is_scalar_x = np.isscalar(x)
    if is_scalar_x:
        fp = ufloat(fp, dfp)
        f = interp([x], xp, fp, left, right)[0]
        return f.n, f.s
    else:
        fp = uarray(fp, dfp)
        f = interp(x, xp, fp, left, right)
        return uval(f), udev(f)
Esempio n. 6
0
def _show_plots(target, fitted, wt, wo, corrected):
    tau_NP, tau_P, attenuator, rate = target
    tau_NP_f, tau_P_f, attenuation_f, rate_f = fitted

    # Plot the results
    sim_pars = (
        r'Sim $\tau_{NP}=%g\,{\rm %s}$,  $\tau_{P}=%g\,{\rm %s}$,  ${\rm attenuator}=%g$'
    ) % (tau_NP, DEADTIME_UNITS, tau_P, DEADTIME_UNITS, attenuator)
    fit_pars = (
        r'Fit $\tau_{NP}=%s$,  $\tau_P=%s$,  ${\rm attenuator}=%.2f$') % (
            ("%.2f" % tau_NP_f[0] if np.inf > tau_NP_f[1] > 0 else "-"),
            ("%.2f" % tau_P_f[0] if np.inf > tau_P_f[1] > 0 else "-"),
            1. / attenuation_f[0],
        )
    title = '\n'.join((sim_pars, fit_pars))
    import pylab
    pylab.subplot(211)
    #pylab.errorbar(rate, rate_f[0], yerr=rate_f[1], fmt='c.', label='fitted rate')
    #mincident = np.linspace(rate[0], rate[-1], 400)
    #munattenuated = expected_rate(mincident, tau_NP_f[0], tau_P_f[0])
    #mattenuated = expected_rate(mincident/attenuator, tau_NP_f[0], tau_P_f[0])
    #minc = np.hstack((mincident, 0., mincident))
    #mobs = np.hstack((munattenuated, np.NaN, mattenuated))
    #pylab.plot(minc, mobs, 'c-', label='expected rate')
    pylab.errorbar(rate,
                   uval(corrected),
                   yerr=udev(corrected),
                   fmt='r.',
                   label='corrected rate')
    _show_rates(rate, wo, wt, attenuator, tau_NP_f[0], tau_P_f[0])
    pylab.subplot(212)
    _show_droop(rate, wo, wt, attenuator)
    pylab.suptitle(title)

    #pylab.figure(); _show_inversion(wo, tau_P_f, tau_NP_f)
    pylab.show()
Esempio n. 7
0
def run_sim(tau_NP=0, tau_P=0, attenuator=10, mode='mixed', plot=True):
    """
    Run a simulated dead time estimation measurement and dead time recovery.

    Print the simulated and recovered values.

    Plot the expected data in absolute and relative form.
    """

    # set rate, target counts and cutoff time
    tmax = -np.log10(DEADTIME_SCALE*(tau_NP+tau_P)/2)+0.5
    tmin = tmax-3.5
    #tmin, tmax = np.log10(5000.), np.log10(35000.)
    rate = np.logspace(tmin, tmax, 10)
    rate[0], rate[-1] = rate[0]-1, rate[-1]+1

    target_counts, cutoff_time = int(rate[-1]*0.2), 5*60
    #target_counts, cutoff_time = int(rate[-1]*0.5), 15*60
    #target_counts, cutoff_time = 1e20, 30*60
    # target_counts = 1e20  # force count by time

    # simulate data
    attenuated, unattenuated = simulate_measurement(rate, target_counts,
                attenuator, tau_NP, tau_P, cutoff_time=cutoff_time)

    # estimate dead time
    try:
        res = deadtime_from_counts([attenuated, unattenuated], mode=mode)
        #print(*res)
    except Exception as exc:
        res = (tau_NP, 0), (tau_P, 0), (1./attenuator, 0), (rate, 0*rate)
        print(exc)
    tau_NP_f, tau_P_f, attenuation_f, rate_f = res

    Ipeak, Rpeak = peak_rate(tau_NP=tau_NP_f[0], tau_P=tau_P_f[0])
    #print("counts",counts)
    #print("time",time)

    # redo simulation for test
    attenuated, unattenuated = simulate_measurement(rate, target_counts,
                attenuator, tau_NP, tau_P, cutoff_time=cutoff_time)
    # correct the unattenuated data
    wt = (attenuated[0]/attenuated[1], sqrt(attenuated[0])/attenuated[1])
    wo = (unattenuated[0]/unattenuated[1], sqrt(unattenuated[0])/unattenuated[1])
    scale = estimate_attenuation((wo[0], wo[1]), tau_NP=tau_NP_f, tau_P=tau_P_f,
        #above=True
    )
    corrected = scale*uarray(wo[0], wo[1])
    #print("correction",ufloat(wo[0][-1],wo[1][-1]),"*",scale[-1],"=",corrected[-1])

    # Print results
    total_time = np.sum(attenuated[1] + unattenuated[1])
    print("  Total time to run experiment: %.2f hrs"%(total_time/3600))
    def _compare(name, target, fitted):
        err = (fitted.n-target)/(target if target > 0 else 1.)
        sbad = "*" if abs(err) > 0.5 else " "
        sval = str(target)
        sfit = str(fitted) if np.isfinite(fitted.s) else "%g"%fitted.n
        serr = "%.2f%%"%(err*100)
        print(" %s%s sim=%s, fit=%s, err=%s"%(sbad, name, sval, sfit, serr))
    _compare("tau_NP", tau_NP, ufloat(*tau_NP_f))
    _compare("tau_P", tau_P, ufloat(*tau_P_f))
    _compare("atten", attenuator, 1./ufloat(*attenuation_f))
    print("  peak observed rate %d at %d incident"
          %(int(Ipeak if np.isfinite(Ipeak) else 3*Rpeak), int(Rpeak)))
    #print(" rate", rate_f[0])
    #print("  drate/rate", rate_f[1]/rate_f[0])
    #print("  rate residuals", (rate - rate_f[0])/rate_f[1])
    #print("  scale", " ".join('{:S}'.format(v) for v in scale))
    print("  scale", " ".join('%.2f'%v.n for v in scale))
    rel_err = (rate-uval(corrected))/rate
    print("  error (r-r')/r:",
          " ".join("%.2f%%"%v for v in 100*rel_err[rate <= Ipeak]))
    print("  corrected  (r-r')/dr':",
          " ".join("% .2f"%v for v in
                   (rate-uval(corrected))/udev(corrected)))
    print("  uncorrected (r-r')/dr:",
          " ".join("% .2f"%v for v in (rate-wo[0])/wo[1]))
    target = (tau_NP, tau_P, attenuator, rate)
    fitted = (tau_NP_f, tau_P_f, attenuation_f, rate_f)
    if plot:
        _show_plots(target, fitted, wt, wo, corrected)
Esempio n. 8
0
def estimate_incident(observed_rate, tau_NP, tau_P, above=False):
    """
    Estimate incident rate given observed rate.

    *observed_rate* is a pair of vectors *(r, dr)*.  Gaussian uncertainty
    is sufficient since the rate measurements should be based on a large
    number of counts.

    *tau_NP* and *tau_P* can be estimated from pairs of rate measurements,
    one with and one without an attenuator.  See :func:`tau_fit` for details.
    Each time constant is a pair *(T, dT)* scaled by *DEADTIME_SCALE*. The
    uncertainty *dT* is ignored for now, since the time constant estimates
    can be highly correlated, particularly if the measurement range is low.

    If *above* is True, then return the rate estimate assuming that the
    rate is higher than the peak rate.

    Returns (I,dI), the estimated incident rate and its uncertainty.

    Note: the uncertainty analysis for paralyzing models is dodgy, as it
    simply scales the uncertainty in the observed counts, suggesting the
    model is perfectly known.  This is fine for most points, but for high
    saturation where the observed rate flattens, a large change in incident
    rate only causes a small change in observed rate.
    """

    # Nonparalyzing dead time only
    if tau_P[0] == 0.:
        # Use direct calculation for pure non-paralyzing models
        R = uarray(*observed_rate)
        tau_NP = ufloat(*tau_NP)
        I = R/(1-R*tau_NP*DEADTIME_SCALE)
        # May accidentally exceed the peak rate; in that case, limit
        # the damage to the lowest rate consistent with the uncertainty
        # in the observed rate.  Basically, look at the denominator in
        # the above equation, and if it is within error of zero, use the
        # relative uncertainty as the scale factor.
        #idx = (R*tau_NP[0]*DEADTIME_SCALE > (1-dR/R))
        #I[idx] = R[idx]**2/dR[idx]
        return uval(I), udev(I)

    # Paralyzing and mixed dead time
    R, dR = observed_rate
    Ipeak, Rpeak = peak_rate(tau_NP[0], tau_P[0])
    if above:
        # Use bisection for intensity above Ipeak.  This was stable enough
        # for the problems tried.  We haven't put effort into improving
        # performance this capability isn't likely to be used in production.
        n, p = tau_NP[0]*DEADTIME_SCALE, tau_P[0]*DEADTIME_SCALE
        I = [_invert_above(Ipeak, Rpeak, rk, n, p) for rk in R]
        # We can probably get Newton-Raphson iteration to work if we start
        # it in the right place.  The first try with Io = Ipeak*Rpeak/r
        # gave numerical problems, presumably because it was starting too
        # far out in I.  If this capability becomes important this problem
        # can be solved for reasonable values of r.
        I = np.array(I)
    else:
        # Use solver for P and mixed P-NP problems.
        n, p = tau_NP[0]*DEADTIME_SCALE, tau_P[0]*DEADTIME_SCALE
        r = np.asarray(observed_rate[0], 'd')
        # Use Newton-Raphson iteration starting from the observed rate.
        # It converges quickly and stably away from Rpeak.  Near Rpeak
        # it may give numerical problems, but these points are handled
        # elsewhere, so the errors and warnings are suppressed.
        with np.errstate(all='ignore'):
            I = r.copy()
            I -= _forward(I, n, p, r) / _dforward(I, n, p)
            I -= _forward(I, n, p, r) / _dforward(I, n, p)
            I -= _forward(I, n, p, r) / _dforward(I, n, p)
        # Use bisection near Rpeak since the slope approaches zero and
        # Newton-Raphson may fail.  These should be rare.
        index = (r > 0.9*Rpeak)
        I[index] = [_invert_below(Ipeak, Rpeak, rk, n, p) for rk in r[index]]

    return I, (I/R)*dR
Esempio n. 9
0
def demo_error_prop(title, rate, monitors, attenuators=None,
                    norm='monitor'):
    """
    Compare point averaging algorithms on simulated data.

    *title* is the label for the comparison.

    *rate* is the underlying count rate.

    *monitors* is the set of counting intervals, or counting time if
    Plugging in some numbers combining two counts, one for 2000 monitors
    and one for 4000 monitors, we can compute the difference between
    measuring the values separately and measuring them together.  Using
    count rates which are low relative to the monitor yields a relative
    error on the order of 1/10^6:

        Na=7, Ma=2000, Nb=13, Mb=4000, Aa=Ab=1, dAa=dAb=0

    When count rates are high, such as with a direct beam measurement where
    the monitor rate 10% of the detector rate, the relative error is on
    the order of 0.02%::

        Na=20400, Ma=2000, Nb=39500, Mb=4000

    Monitor uncertainty is significant at high count rates.  In the above
    example, the rate uncertainty dr/r for Na/Ma is found to be 2.4% when
    monitor uncertainty is included, but only 0.7% if monitor uncertainty is
    not included in the calculation.  At high Q, where uncertainty count rates
    are much lower than the monitor rate, monitor uncertainty is much less
    important.

    The effect of Poisson versus gaussian averaging is marginal, even for
    regions with extremely low counts, so long as the gaussian average is
    weighted by monitor counts.
    """
    from uncertainties import ufloat
    from uncertainties.unumpy import uarray, nominal_values as uval, \
        std_devs as udev
    from numpy.random import poisson

    #norm='time'
    #norm='monitor'
    time_err = 0
    #time_err = 0.1

    use_attenuators = attenuators is not None
    monitors = np.array(monitors, 'd')
    if norm == 'monitor':
        umonitors = uarray(monitors, np.sqrt(monitors))
    else:
        umonitors = uarray(monitors, time_err)
    if use_attenuators:
        uattenuators = uarray(*list(zip(*attenuators)))
    else:
        uattenuators = uarray(monitors/monitors, 0.*monitors)
    expected = rate*monitors/uval(uattenuators)
    # TODO: simulation is not correct for count by monitor
    counts = poisson(expected)    # Simulated counts
    #counts = expected   # Non-simulated counts
    #ucounts = uarray(counts, np.sqrt(counts + (counts==0)))
    #ucounts = uarray(counts + (counts==0), np.sqrt(counts + (counts==0)))
    ucounts = uarray(counts, np.sqrt(counts))
    incident = ucounts*uattenuators

    print("="*10, title+", rate=%g,"%rate, \
          "median counts=%d"%np.median(counts), "="*10)

    # rate averaged across different counting intervals
    y = incident/umonitors
    y_ave = poisson_average(uval(y), udev(y), norm=norm)
    y_gm = np.sum(monitors*y)/np.sum(monitors)
    y_ave = ufloat(*y_ave)
    y_g = np.mean(y)

    # rate estimated from full counting time

    tin = (np.sum(incident) if use_attenuators
           else ufloat(np.sum(counts), np.sqrt(np.sum(counts))))
    if norm == 'monitor':
        tmon = ufloat(np.sum(monitors), np.sqrt(np.sum(monitors)))
    else:
        tmon = ufloat(np.sum(monitors), len(monitors)*time_err)
    direct = tin/tmon

    #print("monitors:", monitors)
    #print("counts:", counts)
    #print("incident:", incident)
    if use_attenuators:
        print("attenuators:", list(zip(*attenuators))[0])
    def show(label, r, tag=""):
        if r is direct:
            rel = ""
        else:
            rel = (" diff: (%.1f,%.1f)%% "
                   % (100*(r.n-direct.n)/direct.n, 100*(r.s-direct.s)/direct.s))
        print(label, "r:", r, " dr/r: %.2f%%"%(100*r.s/r.n), rel, tag)
    show("Combined", direct)
    show("Poisson ", y_ave)
    show("Gaussian", y_g)
    show("Gaussian", y_gm, "monitor weighted")

    # again without monitor uncertainty
    if 0:
        y2 = incident/monitors
        y2_ave = ufloat(*poisson_average(uval(y2), udev(y2)))
        y2_g = np.mean(y2)
        show("Separate", y2_ave, "no monitor uncertainty")
        show("Gaussian", y2_g, "no monitor uncertainty")
Esempio n. 10
0
def solve_err(A, dA, b, db):
    A = umatrix(A, dA)
    b = umatrix(b, db).T
    x = A.I*b
    return uval(x), udev(x)
Esempio n. 11
0
def run_sim(tau_NP=0, tau_P=0, attenuator=10, mode='mixed', plot=True):
    """
    Run a simulated dead time estimation measurement and dead time recovery.

    Print the simulated and recovered values.

    Plot the expected data in absolute and relative form.
    """

    # set rate, target counts and cutoff time
    tmax = -np.log10(DEADTIME_SCALE * (tau_NP + tau_P) / 2)
    rate = np.logspace(tmax - 3, tmax + 0.5, 10)
    rate[0], rate[-1] = rate[0] - 1, rate[-1] + 1

    target_counts, cutoff_time = int(rate[-1] * 0.2), 5 * 60
    #target_counts, cutoff_time = int(rate[-1]*0.5), 15*60
    #target_counts, cutoff_time = 1e20, 30*60
    # target_counts = 1e20  # force count by time

    # simulate data
    attenuated, unattenuated = simulate_measurement(rate,
                                                    target_counts,
                                                    attenuator,
                                                    tau_NP,
                                                    tau_P,
                                                    cutoff_time=cutoff_time)

    # estimate dead time
    try:
        res = deadtime_from_counts(attenuated, unattenuated, mode=mode)
        #print(*res)
    except Exception as exc:
        res = (tau_NP, 0), (tau_P, 0), (1. / attenuator, 0), (rate, 0 * rate)
        print(exc)
    tau_NP_f, tau_P_f, attenuation_f, rate_f = res

    Ipeak, Rpeak = peak_rate(tau_NP=tau_NP_f[0], tau_P=tau_P_f[0])
    #print("counts",counts)
    #print("time",time)

    # redo simulation for test
    attenuated, unattenuated = simulate_measurement(rate,
                                                    target_counts,
                                                    attenuator,
                                                    tau_NP,
                                                    tau_P,
                                                    cutoff_time=cutoff_time)
    # correct the unattenuated data
    wt = (attenuated[0] / attenuated[1], sqrt(attenuated[0]) / attenuated[1])
    wo = (unattenuated[0] / unattenuated[1],
          sqrt(unattenuated[0]) / unattenuated[1])
    scale = attenuation_estimate(
        (wo[0], wo[1]),
        tau_NP=tau_NP_f,
        tau_P=tau_P_f,
        #above=True
    )
    corrected = scale * uarray(wo[0], wo[1])
    #print("correction",ufloat(wo[0][-1],wo[1][-1]),"*",scale[-1],"=",corrected[-1])

    # Print results
    total_time = np.sum(attenuated[1] + unattenuated[1])
    print("  Total time to run experiment: %.2f hrs" % (total_time / 3600))

    def _compare(name, target, fitted):
        err = (fitted.n - target) / (target if target > 0 else 1.)
        sbad = "*" if abs(err) > 0.5 else " "
        sval = str(target)
        sfit = str(fitted) if np.isfinite(fitted.s) else "%g" % fitted.n
        serr = "%.2f%%" % (err * 100)
        print(" %s%s sim=%s, fit=%s, err=%s" % (sbad, name, sval, sfit, serr))

    _compare("tau_NP", tau_NP, ufloat(*tau_NP_f))
    _compare("tau_P", tau_P, ufloat(*tau_P_f))
    _compare("atten", attenuator, 1. / ufloat(*attenuation_f))
    print("  peak observed rate %d at %d incident" %
          (int(Ipeak if np.isfinite(Ipeak) else 3 * Rpeak), int(Rpeak)))
    #print(" rate", rate_f[0])
    #print("  drate/rate", rate_f[1]/rate_f[0])
    #print("  rate residuals", (rate - rate_f[0])/rate_f[1])
    #print("  scale", " ".join('{:S}'.format(v) for v in scale))
    print("  scale", " ".join('%.2f' % v.n for v in scale))
    rel_err = (rate - uval(corrected)) / rate
    print("  error (r-r')/r:",
          " ".join("%.2f%%" % v for v in 100 * rel_err[rate <= Ipeak]))
    print(
        "  corrected  (r-r')/dr':",
        " ".join("% .2f" % v
                 for v in (rate - uval(corrected)) / udev(corrected)))
    print("  uncorrected (r-r')/dr:",
          " ".join("% .2f" % v for v in (rate - wo[0]) / wo[1]))
    target = (tau_NP, tau_P, attenuator, rate)
    fitted = (tau_NP_f, tau_P_f, attenuation_f, rate_f)
    if plot:
        _show_plots(target, fitted, wt, wo, corrected)
Esempio n. 12
0
def solve_err(A, dA, b, db):
    A = umatrix(A, dA)
    b = umatrix(b, db).T
    x = A.I*b
    return uval(x), udev(x)
Esempio n. 13
0
def estimate_incident(observed_rate, tau_NP, tau_P, above=False):
    """
    Estimate incident rate given observed rate.

    *observed_rate* is a pair of vectors *(r, dr)*.  Gaussian uncertainty
    is sufficient since the rate measurements should be based on a large
    number of counts.

    *tau_NP* and *tau_P* can be estimated from pairs of rate measurements,
    one with and one without an attenuator.  See :func:`tau_fit` for details.
    Each time constant is a pair *(T, dT)* scaled by *DEADTIME_SCALE*. The
    uncertainty *dT* is ignored for now, since the time constant estimates
    can be highly correlated, particularly if the measurement range is low.

    If *above* is True, then return the rate estimate assuming that the
    rate is higher than the peak rate.

    Returns (I,dI), the estimated incident rate and its uncertainty.

    Note: the uncertainty analysis for paralyzing models is dodgy, as it
    simply scales the uncertainty in the observed counts, suggesting the
    model is perfectly known.  This is fine for most points, but for high
    saturation where the observed rate flattens, a large change in incident
    rate only causes a small change in observed rate.
    """

    # Nonparalyzing dead time only
    if tau_P[0] == 0.:
        # Use direct calculation for pure non-paralyzing models
        R = uarray(*observed_rate)
        tau_NP = ufloat(*tau_NP)
        I = R / (1 - R * tau_NP * DEADTIME_SCALE)
        # May accidentally exceed the peak rate; in that case, limit
        # the damage to the lowest rate consistent with the uncertainty
        # in the observed rate.  Basically, look at the denominator in
        # the above equation, and if it is within error of zero, use the
        # relative uncertainty as the scale factor.
        #idx = (R*tau_NP[0]*DEADTIME_SCALE > (1-dR/R))
        #I[idx] = R[idx]**2/dR[idx]
        return uval(I), udev(I)

    # Paralyzing and mixed dead time
    R, dR = observed_rate
    Ipeak, Rpeak = peak_rate(tau_NP[0], tau_P[0])
    if above:
        # Use bisection for intensity above Ipeak.  This was stable enough
        # for the problems tried.  We haven't put effort into improving
        # performance this capability isn't likely to be used in production.
        n, p = tau_NP[0] * DEADTIME_SCALE, tau_P[0] * DEADTIME_SCALE
        I = [_invert_above(Ipeak, Rpeak, rk, n, p) for rk in R]
        # We can probably get Newton-Raphson iteration to work if we start
        # it in the right place.  The first try with Io = Ipeak*Rpeak/r
        # gave numerical problems, presumably because it was starting too
        # far out in I.  If this capability becomes important this problem
        # can be solved for reasonable values of r.
        I = np.array(I)
    else:
        # Use solver for P and mixed P-NP problems.
        n, p = tau_NP[0] * DEADTIME_SCALE, tau_P[0] * DEADTIME_SCALE
        r = np.asarray(observed_rate[0], 'd')
        # Use Newton-Raphson iteration starting from the observed rate.
        # It converges quickly and stably away from Rpeak.  Near Rpeak
        # it may give numerical problems, but these points are handled
        # elsewhere, so the errors and warnings are suppressed.
        with np.errstate(all='ignore'):
            I = r.copy()
            I -= _forward(I, n, p, r) / _dforward(I, n, p)
            I -= _forward(I, n, p, r) / _dforward(I, n, p)
            I -= _forward(I, n, p, r) / _dforward(I, n, p)
        # Use bisection near Rpeak since the slope approaches zero and
        # Newton-Raphson may fail.  These should be rare.
        index = (r > 0.9 * Rpeak)
        I[index] = [_invert_below(Ipeak, Rpeak, rk, n, p) for rk in r[index]]

    return I, (I / R) * dR