Esempio n. 1
0
def check(name, data, target, origin=False, tol=2e-16):
    """
    name   data set name
    data   [y,x]
    target [p,dp] but low to high rather than high to low
    """
    p = wpolyfit(data[:, 1], data[:, 0], degree=target.shape[0]-1, origin=origin)
    Ep, Edp = N.flipud(target).T
    show_result(name, p.coeff, p.std, Ep, Edp, tol=tol)
Esempio n. 2
0
def check_uncertainty(n=10000):
    """
    This function computes a number of fits to simulated data
    to determine how well the values and uncertainties reported
    by the wpolyfit solver correspond to individual fits of the data.

    For large N the reported parameters do indeed converge to the mean
    parameter values for fits to resampled data.  Reported parameter
    uncertainty estimates are not supported by MC.
    """
    ##          x         y          dy
    data = N.matrix("""
        0.0013852  0.2144023  0.020470;
        0.0018469  0.2516856  0.022868;
        0.0023087  0.3070443  0.026362;
        0.0027704  0.3603186  0.029670;
        0.0032322  0.4260864  0.033705;
        0.0036939  0.4799956  0.036983
        """).A
    x, y, dy = data[:, 0], data[:, 1], data[:, 2]
    if True: # simpler system to analyze
        x = N.linspace(2, 4, 12)
        y = 3*x+5
        dy = y
    p = wpolyfit(x, y, dy=dy, degree=1)
    P = N.empty((2, n), 'd')
    for i in range(n):
        #pi = N.polyfit(x,N.random.normal(y,dy),degree=1)
        pi = wpolyfit(x, N.random.normal(y, dy), dy=dy, degree=1)
        P[:, i] = pi.coeff
    #print("P", P)
    Ep, Edp = N.mean(P, 1), N.std(P, 1)
    show_result("uncertainty check", p.coeff, p.std, Ep, Edp)

    if False:
        import pylab
        pylab.hist(P[0, :])
        pylab.show()
    """ # Not yet converted from octave
Esempio n. 3
0
def _fit_footprint_data(x, y, dy, kind):
    """
    Fit the footprint from the measurement in *x*, *y*, *dy*.

    The fit is restricted to *low <= x <= high*.  *kind* can be 'plateau' if
    the footprint is a constant scale factor, 'slope' if the footprint should
    go through the origin, or 'line' if the footprint is a slope that does
    not go through the origin.
    """
    if len(x) < 2:
        p, dp = np.array([0., 1.]), np.array([0., 0.])
    elif kind == 'plateau':
        poly = wpolyfit(abs(x), y, dy, degree=0, origin=False)
        p, dp = poly.coeff, poly.std
        p, dp = np.hstack((0, p)), np.hstack((0, dp))
    elif kind == 'slope':
        poly = wpolyfit(abs(x), y, dy, degree=1, origin=True)
        p, dp = poly.coeff, poly.std
    elif kind == 'line':
        poly = wpolyfit(abs(x), y, dy, degree=1, origin=False)
        p, dp = poly.coeff, poly.std
    else:
        raise TypeError('unknown footprint type %r' % kind)
    return p, dp
Esempio n. 4
0
def _fit_footprint_data(x, y, dy, kind):
    """
    Fit the footprint from the measurement in *x*, *y*, *dy*.

    The fit is restricted to *low <= x <= high*.  *kind* can be 'plateau' if
    the footprint is a constant scale factor, 'slope' if the footprint should
    go through the origin, or 'line' if the footprint is a slope that does
    not go through the origin.
    """
    if len(x) < 2:
        p, dp = np.array([0., 1.]), np.array([0., 0.])
    elif kind == 'plateau':
        poly = wpolyfit(abs(x), y, dy, degree=0, origin=False)
        p, dp = poly.coeff, poly.std
        p, dp = np.hstack((0, p)), np.hstack((0, dp))
    elif kind == 'slope':
        poly = wpolyfit(abs(x), y, dy, degree=1, origin=True)
        p, dp = poly.coeff, poly.std
    elif kind == 'line':
        poly = wpolyfit(abs(x), y, dy, degree=1, origin=False)
        p, dp = poly.coeff, poly.std
    else:
        raise TypeError('unknown footprint type %r'%kind)
    return p, dp
Esempio n. 5
0
def integrate(data, spec, left, right, pixel_range, degree, mc_samples,
              slices):
    from dataflow.lib import err1d
    from dataflow.lib.wsolve import wpolyfit

    nframes, npixels = data.v.shape

    # Determine the boundaries of each frame
    slit_width, pixel_width = data.slit1.x, data.slit4.x
    divergence = slit_width / pixel_width
    spec_width = spec[0] * divergence + spec[1]
    back_left = spec_width + np.maximum(left[0] * divergence + left[1], 0)
    back_right = spec_width + np.maximum(right[0] * divergence + right[1], 0)
    #print("integrate", spec, slit_width, pixel_width, spec_width, back_left, back_right)

    # Pixels are numbered from 1 to npixels, including the center pixel.
    # Normalize the pixels so the polynomial fitter uses smaller x.
    # Could also normalize by resolution width, but that makes it harder
    # to identify where the integration region is bad.
    center = data.detector.center[0]
    min_pixel, max_pixel = pixel_range
    p1 = np.maximum(min_pixel - center, -back_left)
    p2 = np.maximum(min_pixel - center, -spec_width)
    p3 = np.minimum(max_pixel - center, +spec_width)
    p4 = np.minimum(max_pixel - center, +back_right)
    pixel = np.arange(1, npixels + 1) - center

    # TODO: maybe estimate divergence from specular peak width?
    # The following does not work:
    #    # Assume signal width is chosen to span +/- 2 sigma, or 95%.
    #    sigma = (p3 - p2)/4 * pixel_width
    #    divergence = np.degrees(np.arctan(sigma / data.detector.distance))

    # Find slices we want to plot by looking up the selected slice values
    # in the list of y values for the frames.
    (_, _), (yaxis, _) = data.get_axes()  # get the data axes
    index = np.searchsorted(yaxis, slices)
    # TODO: search y-axis as bin edges rather than centers
    index = index[(index > 0) & (index < nframes)] - 1  # can't do last slice
    index = set(index)  # duplicates and order don't matter for sets

    # Prepare plottable for slices
    series = []  # [{"label": str}, ...]
    lines = [
    ]  # [{c: {"values": [], "errorbars": []} for c in (pixel, intensity, error)}
    columns = {
        "pixel": {
            "label": "Pixel",
            "units": ""
        },  # no errorbars
        "intensity": {
            "label": "Intensity",
            "units": "counts",
            "errorbars": "error"
        }
    }
    plottable = {
        "type": "1d",
        "title": data.name + ":" + data.entry,
        "entry": data.entry,
        "columns": columns,
        "options": {
            "series": series,
            "axes": {
                "xaxis": {
                    "label": "Pixel"
                },
                "yaxis": {
                    "label": "Intensity (counts)"
                },
            },
            "xcol": "pixel",
            "ycol": "intensity",
            "errorbar_width": 0,
        },
        "data": lines,
    }

    def addline(label, x, y, dy=None):
        #print("line", label, x, y, dy)
        if dy is not None:
            line = [[xk, yk, {
                "yupper": yk + dyk,
                "ylower": yk - dyk
            }] for xk, yk, dyk in zip(x, y, dy)]
        else:
            line = [[xk, yk] for xk, yk in zip(x, y)]
        series.append(label)
        lines.append(line)

    # Cycle through detector frames gathering signal and background for each.
    results = []
    for k in range(nframes):
        # Get data for frame.
        y, dy = data.v[k], data.dv[k]
        spec_idx = (pixel >= p2[k]) & (pixel <= p3[k])
        full_idx = (pixel >= p1[k]) & (pixel <= p4[k])
        back_idx = full_idx & ~spec_idx
        spec_x, spec_y, spec_dy = pixel[spec_idx], y[spec_idx], dy[spec_idx]
        back_x, back_y, back_dy = pixel[back_idx], y[back_idx], dy[back_idx]

        if not len(spec_x) or not len(back_x):
            results.append((np.nan, np.nan, np.nan, np.nan, np.zeros_like(y)))
            continue

        # Integrate frame data.
        # TODO: Could do sub-pixel interpolation at the boundary?
        Is, dIs = poisson_sum(spec_y, spec_dy)
        fit = wpolyfit(back_x, back_y, back_dy, degree=degree)
        # Uh, oh! Correlated errors on poly coefficients! How do we integrate?
        if mc_samples > 0:  # using monte-carlo sampling
            # Generate a random set of polynomials from the fit
            coeffs = fit.rand(size=mc_samples)
            # Use Horner's method to evaluate all p_k over all x points
            px = np.zeros((len(spec_x), mc_samples))
            for c in coeffs.T:
                px *= spec_x[:, None]
                px += c[None, :]
            # Integrate p(x) over x
            integral = np.sum(px, axis=0)
            #print("integral", px.shape, mc_samples, coeffs.shape, integral.shape)
            # Find mean and variance of the integrated values
            Ib, dIb = np.mean(integral), np.std(integral)
        else:  # using simple sum ignoring correlation in uncertainties
            est_y = fit(spec_x)
            est_dy = fit.ci(spec_x)
            Ib, dIb = err1d.sum(est_y, est_dy)

        # TODO: consider fitting gaussian to peak or finding FWHM of spec-back
        #signal = spec_y - fit(spec_x)
        #halfmax = signal.max() / 2
        #top_half = spec_x[signal > halfmax]
        #FWHM = top_half[-1] - top_half[0]
        #sigma = FWHM/(2*sqrt(2*log(2)))

        # add slices if the index is in the set of selected indices
        if k in index:
            valstr = str(yaxis[k])
            addline('data:' + valstr, pixel, y, dy)
            addline('spec:' + valstr, spec_x, fit(spec_x))
            addline('back:' + valstr, back_x, fit(back_x))

        # Show background residuals
        residual = y - fit(pixel)
        residual[~full_idx] = np.nan  #0.
        #residual[spec_idx] += signal_jump

        results.append((Is, dIs, Ib, dIb, residual))

    Is, dIs, Ib, dIb, residual = (np.asarray(v) for v in zip(*results))

    return (Is, dIs), (Ib, dIb), residual, plottable