コード例 #1
0
def mean(seq, *args, **kwargs):
    """Return the arithmetic mean of data in ``seq``

    :arg seq: a sequence, :class:`~numpy.ndarray`, or iterable, of numbers or uncertain numbers
    :arg args: optional arguments when ``seq`` is an :class:`~numpy.ndarray`
    :arg kwargs: optional keyword arguments when ``seq`` is an :class:`~numpy.ndarray`
    
    If ``seq`` contains real or uncertain real numbers,
    a real number is returned.

    If ``seq`` contains complex or uncertain complex
    numbers, a complex number is returned.
    
    **Example**::

        >>> data = range(15)
        >>> type_a.mean(data)
        7.0
            
    .. note::
        When ``seq`` is an empty :class:`~numpy.ndarray` or 
        a :class:`~numpy.ndarray` containing any ``NaN`` elements
        ``NaN`` is returned. 
        
        In other cases, a :class:`ZeroDivisionError` is raised when there are no elements in ``seq``.

    """
    return value(type_b.mean(seq, *args, **kwargs))
コード例 #2
0
def mean(seq, *args, **kwargs):
    """Return the arithmetic mean of data in ``seq``

    :arg seq: a sequence, :class:`~numpy.ndarray`, or iterable, of numbers or uncertain numbers
    :arg args: optional arguments when ``seq`` is an :class:`~numpy.ndarray`
    :arg kwargs: optional keyword arguments when ``seq`` is an :class:`~numpy.ndarray`
    
    If ``seq`` contains real or uncertain real numbers,
    a real number is returned.

    If ``seq`` contains complex or uncertain complex
    numbers, a complex number is returned.
    
    **Example**::

        >>> data = range(15)
        >>> type_a.mean(data)
        7.0
            
    """
    return value(type_b.mean(seq, *args, **kwargs))
コード例 #3
0
def standard_deviation(seq, mu=None):
    """Return the sample standard deviation
    
    :arg seq: sequence of data
    :arg mu: the arithmetic mean of ``seq``
        
    If ``seq`` contains real or uncertain real numbers, 
    the sample standard deviation is returned.
    
    If ``seq`` contains complex or uncertain complex
    numbers, the standard deviation in the real and
    imaginary components is evaluated, as well as
    the correlation coefficient between the components.
    The results are returned in a pair of objects: a
    :obj:`~named_tuples.StandardDeviation` namedtuple 
    and a correlation coefficient. 

    Only the values of uncertain numbers are used in calculations. 
    
    **Examples**::

        >>> data = range(15)
        >>> type_a.standard_deviation(data)
        4.47213595499958

        >>> data = [(0.91518731126816899+1.5213442955575518j),
        ... (0.96572684493613492-0.18547192979059401j),
        ... (0.23216598132006649+1.6951311687588568j),
        ... (2.1642786101267397+2.2024333895672563j),
        ... (1.1812532664590505+0.59062101107787357j),
        ... (1.2259264339405165+1.1499373179910186j),
        ... (-0.99422341300318684+1.7359338393131392j),
        ... (1.2122867690240853+0.32535154897909946j),
        ... (2.0122536479379196-0.23283009302603963j),
        ... (1.6770229536619197+0.77195994890476838j)]
        >>> sd,r = type_a.standard_deviation(data)
        >>> sd
        StandardDeviation(real=0.913318449990377, imag=0.8397604244242309)
        >>> r
        -0.31374045124595246
        
    """
    N = len(seq)
    if N == 0:
        raise RuntimeError("empty sequence: {!r}".format(seq))

    if mu is None:
        mu = mean(seq)

    # `type_a.mean` returns either a real or complex
    if isinstance(mu, numbers.Real):
        accum = lambda psum, x: psum + (value(x) - mu)**2
        variance = reduce(accum, seq, 0.0) / (N - 1)

        return math.sqrt(variance)

    elif isinstance(mu, numbers.Complex):
        cv_11, cv_12, cv_12, cv_22 = variance_covariance_complex(seq, mu)

        sd_re = math.sqrt(cv_11)
        sd_im = math.sqrt(cv_22)

        den = sd_re * sd_im

        if den == 0.0:
            if cv_12 != 0.0:
                raise RuntimeError(
                    "numerical instability in covariance calculation")
            else:
                r = 0.0
        else:
            r = cv_12 / den

        return StandardDeviation(sd_re, sd_im), r

    else:
        raise RuntimeError("unexpected type for mean value: {!r}".format(mu))
コード例 #4
0
def line_fit_wtls(x, y, u_x, u_y, a0_b0=None, r_xy=None, label=None):
    """Return a total least-squares straight-line fit 
    
    .. versionadded:: 1.2

    :arg x:     sequence of independent-variable data
    :arg y:     sequence of dependent-variable data 
    :arg u_x:   sequence of uncertainties in ``x``
    :arg u_y:   sequence of uncertainties in ``y``
    :arg a0_b0: a pair of initial estimates for the intercept and slope
    :arg r_xy:  correlation between x-y pairs
    :arg label: suffix labeling the uncertain numbers `a` and `b`

    :returns:   an object containing the fitting results
    :rtype:     :class:`.LineFitWTLS`

    The optional argument ``a_b`` can be used to provide a pair 
    of initial estimates for the intercept and slope. 

    Based on paper by M Krystek and M Anton,
    *Meas. Sci. Technol.* **22** (2011) 035101 (9pp)
    
    **Example**::

        # Pearson-York test data see, e.g., 
        # Lybanon, M. in Am. J. Phys 52 (1) 1984 
        >>> x=[0.0,0.9,1.8,2.6,3.3,4.4,5.2,6.1,6.5,7.4]
        >>> wx=[1000.0,1000.0,500.0,800.0,200.0,80.0,60.0,20.0,1.8,1.0]

        >>> y=[5.9,5.4,4.4,4.6,3.5,3.7,2.8,2.8,2.4,1.5]
        >>> wy=[1.0,1.8,4.0,8.0,20.0,20.0,70.0,70.0,100.0,500.0]

        # standard uncertainties required for weighting
        >>> ux=[1./math.sqrt(wx_i) for wx_i in wx ]
        >>> uy=[1./math.sqrt(wy_i) for wy_i in wy ]

        >>> result = ta.line_fit_wtls(x,y,ux,uy)
        >>> intercept, slope = result.a_b
        >>> intercept
        ureal(5.47991018...,0.29193349...,8)
        >>> slope
        ureal(-0.48053339...,0.057616740...,8)
    
    """
    N = len(x)
    df = N - 2
    if df <= 0 or N != len(y):
        raise RuntimeError("Invalid sequences: len({!r}), len({!r})".format(
            x, y))
    if N != len(u_x) or N != len(u_y):
        raise RuntimeError("Invalid sequences: len({!r}), len({!r})".format(
            u_x, u_y))

    independent = r_xy is None

    x_u = [
        ureal(value(x_i), u_i, inf, None, independent=independent)
        for x_i, u_i in izip(x, u_x)
    ]
    y_u = [
        ureal(value(y_i), u_i, inf, None, independent=independent)
        for y_i, u_i in izip(y, u_y)
    ]
    if not independent:
        for x_i, y_i, r_i in izip(x_u, y_u, r_xy):
            x_i.set_correlation(r_i, y_i)

    result = type_b.line_fit_wtls(x_u, y_u, a_b=a0_b0)

    a, b = result.a_b
    N = result.N
    ssr = result.ssr
    r_ab = a.get_correlation(b)

    a = ureal(a.x,
              a.u,
              df,
              label='a_{}'.format(label) if label is not None else None,
              independent=False)
    b = ureal(b.x,
              b.u,
              df,
              label='b_{}'.format(label) if label is not None else None,
              independent=False)

    real_ensemble((a, b), df)
    a.set_correlation(r_ab, b)

    return LineFitWTLS(a, b, ssr, N)
コード例 #5
0
def merge(a, b, TOL=1E-13):
    """Combine the uncertainty components of ``a`` and ``b``

    :arg a: an uncertain real or complex number
    :arg b: an uncertain real or complex number
    :arg TOL: float

    :returns:   an uncertain number with the value of ``a`` and the 
                uncertainty components of ``a`` and ``b`` combined

    The absolute difference between the values of ``a`` and ``b`` 
    must be less than ``TOL`` and the components of uncertainty 
    associated with ``a`` and ``b`` must be distinct, otherwise
    a :class:`RuntimeError` will be raised.

    Use this function to combine results from
    type-A and type-B uncertainty analyses 
    performed on a common sequence of data.

    .. note::

        Some judgement will be required as to
        when it is appropriate to merge 
        uncertainty components.

        There is a risk of 'double-counting'
        uncertainty if type-B components
        are contributing to the variability
        observed in the data, and therefore
        assessed in a type-A analysis.

    .. versionchanged:: 1.3.3
        Added the `TOL` keyword argument.

    **Example**::

        # From Appendix H3 in the GUM
        
        # Thermometer readings (degrees C)
        t = (21.521,22.012,22.512,23.003,23.507,
            23.999,24.513,25.002,25.503,26.010,26.511)

        # Observed differences with calibration standard (degrees C)
        b = (-0.171,-0.169,-0.166,-0.159,-0.164,
            -0.165,-0.156,-0.157,-0.159,-0.161,-0.160)

        # Arbitrary offset temperature (degrees C)
        t_0 = 20.0
        
        # Calculate the temperature relative to t_0
        t_rel = [ t_k - t_0 for t_k in t ]

        # A common systematic error in all differences
        e_sys = ureal(0,0.01)
        
        b_type_b = [ b_k + e_sys for b_k in b ]

        # Type-A least-squares regression
        y_1_a, y_2_a = type_a.line_fit(t_rel,b_type_b).a_b

        # Type-B least-squares regression
        y_1_b, y_2_b = type_b.line_fit(t_rel,b_type_b)

        # `y_1` and `y_2` have uncertainty components  
        # related to the type-A analysis as well as the 
        # type-B systematic error
        y_1 = type_a.merge(y_1_a,y_1_b)
        y_2 = type_a.merge(y_2_a,y_2_b)

    """
    if abs(value(a) - value(b)) > TOL:
        raise RuntimeError("|a - b| = {} > {}: {!r} != {!r}".format(
            abs(value(a) - value(b)), TOL, a, b))
    else:
        return a + (b - value(b))
コード例 #6
0
def multi_estimate_complex(seq_of_seq, labels=None):
    """
    Return a sequence of uncertain complex numbers

    :arg seq_of_seq: a sequence of sequences of data
    :arg labels: a sequence of `str` labels
    
    :rtype: a sequence of :class:`~lib.UncertainComplex`
        
    The sequences in ``seq_of_seq`` must all be the same length.
    Each sequence contains data that is associated with 
    a particular quantity. An uncertain number for that quantity will  
    be created from sample statistics. The covariance 
    between the different quantities will also be evaluated.
    
    A sequence of elementary uncertain complex numbers is returned. These   
    uncertain numbers are considered to be related, allowing a degrees-of-freedom  
    calculations to be performed on derived quantities. 
    
    **Example**::
    
        # From Appendix H2 in the GUM
        
        >>> I = [ complex(x) for x in (19.663E-3,19.639E-3,19.640E-3,19.685E-3,19.678E-3) ]
        >>> V = [ complex(x) for x in (5.007,4.994,5.005,4.990,4.999)]
        >>> P = [ complex(0,p) for p in (1.0456,1.0438,1.0468,1.0428,1.0433) ]

        >>> v,i,p = type_a.multi_estimate_complex( (V,I,P) )

        >>> get_correlation(v.real,i.real)
        -0.355311219817512

        >>> z = v/i*exp(p)
        >>> z.real
        ureal(127.732169928102...,0.071071407396995...,4.0)
        >>> get_correlation(z.real,z.imag)
        -0.588429784423515...
        
    """
    M = len(seq_of_seq)
    N = len(seq_of_seq[0])

    if labels is not None and len(labels) != M:
        raise RuntimeError("Incorrect number of labels: '{!r}'".format(labels))

    # 1. Create a 2M sequence of sequences of real values
    x = []
    for i in xrange(M):
        x.append([value(z_i.real) for z_i in seq_of_seq[i]])
        x.append([value(z_i.imag) for z_i in seq_of_seq[i]])
        if len(x[-1]) != N:
            raise RuntimeError("{:d}th sequence length is incorrect".format(i))

    TWOM = 2 * M
    N_1 = N - 1
    N_N_1 = N * N_1

    # 2. Evaluate the means and uncertainties (keep the deviation sequences)
    x_mean = [value(math.fsum(seq_i) / N) for seq_i in x]
    x_u = []
    for i in xrange(TWOM):
        mu_i = x_mean[i]
        x[i] = [mu_i - x_ij for x_ij in x[i]]
        x_u.append(math.sqrt(math.fsum(x_ij**2 for x_ij in x[i]) / N_N_1))
    # 3. Define uncertain M complex numbers
    x_influences = []
    rtn = []
    for i in xrange(M):
        j = 2 * i
        uc = ucomplex(complex(x_mean[j], x_mean[j + 1]),
                      x_u[j],
                      x_u[j + 1],
                      0.0,
                      N_1,
                      labels[i] if labels is not None else None,
                      independent=False)
        rtn.append(uc)
        x_influences.extend((uc.real, uc.imag))

    # 4. Calculate covariances and set correlation coefficients
    for i in xrange(TWOM - 1):
        x_i = x[i]
        un_i = x_influences[i]
        for j in xrange(i + 1, TWOM):
            x_j = x[j]
            cv = math.fsum(d_i * d_j for d_i, d_j in izip(x_i, x_j)) / N_N_1
            if cv != 0.0:
                r = cv / (x_u[i] * x_u[j])
                set_correlation_real(un_i, x_influences[j], r)

    complex_ensemble(rtn, N_1)

    return rtn
コード例 #7
0
def multi_estimate_real(seq_of_seq, labels=None):
    """Return a sequence of uncertain real numbers 

    :arg seq_of_seq: a sequence of sequences of data
    :arg labels: a sequence of `str` labels 
    
    :rtype: seq of :class:`~lib.UncertainReal`

    The sequences in ``seq_of_seq`` must all be the same length.
    Each sequence contains 
    a sample of data associated with a particular quantity. 
    An uncertain number will be created for the quantity
    from sample statistics. The covariance 
    between the different quantities will also be evaluated.
    
    A sequence of elementary uncertain numbers is returned. These uncertain numbers 
    are considered to be related, allowing a degrees-of-freedom calculations 
    to be performed on derived quantities. 

    **Example**::
    
        # From Appendix H2 in the GUM
        
        >>> V = [5.007,4.994,5.005,4.990,4.999]
        >>> I = [19.663E-3,19.639E-3,19.640E-3,19.685E-3,19.678E-3]
        >>> phi = [1.0456,1.0438,1.0468,1.0428,1.0433]
        >>> v,i,p = type_a.multi_estimate_real((V,I,phi),labels=('V','I','phi'))
        >>> v
        ureal(4.999000...,0.0032093613071761...,4, label='V')
        >>> i
        ureal(0.019661,9.471008394041335...e-06,4, label='I')
        >>> p
        ureal(1.044460...,0.0007520638270785...,4, label='phi')
        
        >>> r = v/i*cos(p)
        >>> r
        ureal(127.732169928102...,0.071071407396995...,4.0)
        
    """
    M = len(seq_of_seq)
    N = len(seq_of_seq[0])

    if labels is not None and len(labels) != M:
        raise RuntimeError("Incorrect number of labels: '{!r}'".format(labels))

    # Calculate the deviations from the mean for each sequence
    means = []
    dev = []
    for i, seq_i in enumerate(seq_of_seq):
        if len(seq_i) != N:
            raise RuntimeError("{:d}th sequence length inconsistent".format(i))

        mu_i = value(sum(seq_i) / N)
        means.append(mu_i)
        dev.append(tuple(value(x_j) - mu_i for x_j in seq_i))

    # calculate the covariance matrix
    N_N_1 = N * (N - 1)
    u = []
    cv = []  # M elements of len M-1, M-2, ...
    for i, seq_i in enumerate(dev):
        u_i = math.sqrt(math.fsum(d_i**2 for d_i in seq_i) / N_N_1)
        u.append(u_i)
        cv.append([])
        for seq_j in dev[i + 1:]:
            cv[i].append(
                math.fsum(d_i * d_j
                          for d_i, d_j in izip(seq_i, seq_j)) / N_N_1)

    # Create a list of elementary uncertain numbers
    # to return a list of standard uncertainties
    # to normalise the CV matrix.
    df = N - 1
    rtn = []
    for i in xrange(M):
        mu_i = means[i]
        u_i = u[i]
        l_i = labels[i] if labels is not None else ""
        rtn.append(ureal(mu_i, u_i, df, l_i, independent=False))

    # Create the list of ensemble id's,
    # assign it to the register in the context,
    # set the correlation between nodes
    real_ensemble(rtn, df)

    for i in xrange(M):
        u_i = u[i]
        un_i = rtn[i]

        for j in xrange(M - 1 - i):
            cv_ij = cv[i][j]
            if cv_ij != 0.0:
                r = cv_ij / (u_i * u[i + j + 1])
                un_j = rtn[i + j + 1]
                set_correlation_real(un_i, un_j, r)

    return rtn
コード例 #8
0
ファイル: type_b.py プロジェクト: MSLNZ/GTC
def line_fit_wtls(x, y, u_x=None, u_y=None, a_b=None, r_xy=None):
    """Perform straight-line regression with uncertainty in ``x`` and ``y``

    .. versionadded:: 1.2
    
    :arg x: list of uncertain real numbers for the independent variable
    :arg y: list of uncertain real numbers for the dependent variable
    :arg u_x: a sequence of uncertainties for the ``x`` data
    :arg u_y: a sequence of uncertainties for the ``y`` data
    :arg a_b: a pair of initial estimates for the intercept and slope
    :arg r_xy: correlation between x-y pairs [default: 0]

    Returns a :class:`~type_b.LineFitWTLS` object

    The elements of ``x`` and ``y`` must be uncertain numbers
    with non-zero uncertainties. If specified, the optional arguments 
    ``u_x`` and ``u_y`` will be used uncertainties to weight 
    the data for the regression, otherwise the uncertainties of
    the uncertain numbers in the sequences are used.
    
    The optional argument ``a_b`` can be used to provide a pair 
    of initial estimates for the intercept and slope. Otherwise, 
    initial estimates will be obtained by calling `line_fit_wls`.
    
    Implements a Weighted Total Least Squares algorithm
    that allows for correlation between x-y pairs. See reference: 
    
    M Krystek and M Anton, *Meas. Sci. Technol.* **22** (2011) 035101 (9pp)
        
    **Example**::

        # Pearson-York test data
        # see, e.g., Lybanon, M. in Am. J. Phys 52 (1), January 1984 
        >>> xin=[0.0,0.9,1.8,2.6,3.3,4.4,5.2,6.1,6.5,7.4]
        >>> wx=[1000.0,1000.0,500.0,800.0,200.0,80.0,60.0,20.0,1.8,1.0]
        >>> yin=[5.9,5.4,4.4,4.6,3.5,3.7,2.8,2.8,2.4,1.5]
        >>> wy=[1.0,1.8,4.0,8.0,20.0,20.0,70.0,70.0,100.0,500.0]

        # Convert weights to standard uncertainties 
        >>> uxin=[1./math.sqrt(wx_i) for wx_i in wx ]
        >>> uyin=[1./math.sqrt(wy_i) for wy_i in wy ]

        # Define uncertain numbers
        >>> x = [ ureal(xin_i,uxin_i) for xin_i,uxin_i in zip(xin,uxin) ]
        >>> y = [ ureal(yin_i,uyin_i) for yin_i,uyin_i in zip(yin,uyin) ]

        # TLS returns uncertain numbers
        >>> a,b = type_b.line_fit_wtls(x,y).a_b
        >>> a
        ureal(5.47991018...,0.29193349...,inf)
        >>> b
        ureal(-0.48053339...,0.057616740...,inf)

    """
    N = len(x)
    if N != len(y):
        raise RuntimeError(
            "Different sequence lengths: len({!r}) != len({!r})".format(x, y))

    if (u_x is not None or u_y is not None):
        if (u_x is None or u_y is None):
            raise RuntimeError("You must supply ``u_x`` and ``u_y``")
        elif (r_xy is None):
            # default value will be uncorrelated
            r_xy = [0] * len(u_x)

        if len(u_x) != N or len(u_y) != N:
            raise RuntimeError(
                "incompatible sequence lengths: {!r}, {!r}".format(u_x, u_y))

    for x_i, y_i in izip(x, y):
        assert isinstance(x_i, UncertainReal), 'uncertain real required'
        assert isinstance(y_i, UncertainReal), 'uncertain real required'

    # Needed to define UNs locally
    ureal = lambda x, u: UncertainReal._elementary(x, u, inf, None, True)

    if a_b is None:
        a_b = line_fit_wls(x, y, u_y).a_b

    a0 = value(a_b[0])
    b0 = value(a_b[1])

    # initial value for `alpha`
    alpha0 = math.atan(b0)

    # chi_sq(alpha0) -> chisquared
    chi_sq = ChiSq(x, y, u_x, u_y, r_xy)

    # Search for the minimum chi-squared wrt alpha
    x1 = alpha0 - HALF_PI
    x2 = alpha0 + HALF_PI

    # `brent` requires three points that bracket the minimum.
    # the `x1`, `alpha0`, `x2` parameters should be real,
    # but `data` will return an uncertain number
    # and expects an uncertain number argument.
    #
    # Returns x, fn(x) and df_dx(x), all floats
    alpha1, fn_alpha1, df_alpha1 = _dbrent(x1, alpha0, x2, chi_sq)

    # dChiSq_a( alpha ) will return dChiSq_dalpha(`alpha`)
    # dChiSq_a(alpha0) -> 1st partial derivative of chisquared at alpha0
    dChiSq_a = dChiSq_dalpha(x, y, u_x, u_y, r_xy)

    # Need the partial derivative of dChiSq_a wrt alpha
    alpha = ureal(alpha1, 1)
    F_alpha = dChiSq_a(alpha)
    dalpha_dF = -1.0 / F_alpha.sensitivity(alpha)

    # Now we define `alpha` with sensitivity to the ``x`` and ``y`` data,
    # via the object ``F_alpha``, which represents the 1st partial derivative
    # of chi-squared at alpha1 (ideally zero, but really only close to the root).
    F_alpha = dChiSq_a(UncertainReal._constant(alpha1))
    alpha = UncertainReal(alpha1, scale_vector(F_alpha._u_components,
                                               dalpha_dF),
                          scale_vector(F_alpha._d_components, dalpha_dF),
                          scale_vector(F_alpha._i_components, dalpha_dF))

    # The sensitivity of p_hat to the x and y data is via
    # `alpha`, `x_bar` and `y_bar` in eqn (43)
    p_hat = chi_sq.p_hat(alpha)

    # Note we have reversed the definitions of `a` and `b` here
    b = alpha._tan()
    a = p_hat / alpha._cos()

    N = len(x)

    ssr = chi_sq(UncertainReal._constant(alpha1)).x

    return LineFitWTLS(a, b, ssr, N)
コード例 #9
0
ファイル: type_b.py プロジェクト: MSLNZ/GTC
def _dbrent(ax, bx, cx, fn, tol=math.sqrt(EPSILON)):
    """
    Minimise fn() and return x, fn(x) and df_dx(x), all floats
    
    `fn` must be a univariate function of an uncertain real that returns
    an uncertain real number.

    `ax`, `bx` and `cx` must be floats. `bx` must be between `ax` and `cx`
    and fn(bx) must be less than both fn(ax) and fn(cx).

    `context` - a GTC context
    
    `tol` - the fractional precision

    See also Numerical Recipes in C, 2nd ed, Section 10.3
    
    """
    ITMAX = 100

    deriv = lambda y, x: y.sensitivity(x)
    ureal = lambda x, u: UncertainReal._elementary(x, u, inf, None, True)
    e = 0.0  # The distance moved on the step before last

    a = ax if ax < cx else cx
    b = ax if ax > cx else cx

    assert a <= bx and bx <= b, "Invalid initial values in _dbrent"

    # if fn( ureal(bx,1)) > fn( ureal(a,1)) or fn(ureal(bx,1)) > fn(ureal(b,1)):
    # assert False

    x = w = v = bx

    _u_ = ureal(x, 1.0)
    fn_u = fn(_u_)

    fw = fv = fx = value(fn_u)
    dw = dv = dx = deriv(fn_u, _u_)

    # The routine keeps track of `a` and `b`, which bracket the minimum,
    # `x` is the point with the least function value found so far,
    # `w` is the point with the second least value, `v` is the previous
    # value of `w`, `u` is the point at which the function was most
    # recently evaluated.

    for i in xrange(ITMAX):

        xm = 0.5 * (a + b)
        tol1 = tol * abs(x) + ZEPS
        tol2 = 2.0 * tol1

        if abs(x - xm) <= (tol2 - 0.5 * (b - a)):
            return x, fx, dx

        if abs(e) > tol1:
            # initialise the d's to be out of bracket
            d1 = 2.0 * (b - a)
            d2 = d1

            # Secant method
            if dw != dx: d1 = (w - x) * dx / (dx - dw)
            if dv != dx: d2 = (v - x) * dx / (dx - dv)

            # Choose one estimate.
            # Insist that it be within the bracket
            # and on the side pointed to by the derivative at `x`
            u1 = x + d1
            u2 = x + d2
            OK1 = (a - u1) * (u1 - b) > 0.0 and dx * d1 <= 0.0
            OK2 = (a - u2) * (u2 - b) > 0.0 and dx * d2 <= 0.0

            olde, e = e, d

            if OK1 or OK2:
                if OK1 and OK2:
                    d = d1 if abs(d1) < abs(d2) else d2
                elif OK1:
                    d = d1
                else:
                    d = d2

                if abs(d) <= abs(0.5 * olde):
                    u = x + d
                    if (u - a < tol2) or (b - u < tol2):
                        d = math.copysign(tol1, xm - x)
                else:
                    # choose segment by the sign of the derivative
                    e = a - x if dx >= 0.0 else b - x
                    d = 0.5 * e
            else:
                e = a - x if dx >= 0.0 else b - x
                d = 0.5 * e

        else:
            e = a - x if dx >= 0.0 else b - x
            d = 0.5 * e

        if abs(d) >= tol1:
            u = x + d

            _u_ = ureal(u, 1.0)
            fn_u = fn(_u_)

            fu = value(fn_u)
            du = deriv(fn_u, _u_)

        else:
            # Smallest step possible
            u = x + math.copysign(tol1, d)

            _u_ = ureal(u, 1.0)
            fn_u = fn(_u_)

            fu = value(fn_u)
            du = deriv(fn_u, _u_)

            # If the minimum sized step downhill
            # goes up, then we are done!
            if fu > fx:
                return u, fu, du

        assert a <= u and u <= b, (a, u, b)  # invariant

        if fu <= fx:
            # Found a new best point

            # Update the bracket on one side so that the previous
            # `x` value is now the limit and the new `x` value is
            # contained.
            if u >= x:
                a = x
            else:
                b = x

            v, fv, dv = w, fw, dw
            w, fw, dw = x, fx, dx
            x, fx, dx = u, fu, du

            assert a <= x and x <= b, (a, x, b)  # invariant

        else:
            # The point `x` has not been bettered

            # `x` does not change, but `u` was inside the
            # bracket so we can tighten the noose.
            if u < x:
                a = u
            else:
                b = u

            # `w` is the second best point and `v` is the 3rd best
            if (fu <= fw) or (x == w):
                v, fv, dv = w, fw, dw
                w, fw, dw = u, fu, du

            elif (fu < fv) or (v == x) or (v == w):
                v, fv, dv = u, fu, du

            assert a <= x and x <= b, (a, x, b)  # invariant

        assert fx <= fw and fx <= fv  # invariant

    raise RuntimeError('Exceeded iteration limit in `_dbrent`')
コード例 #10
0
ファイル: type_b.py プロジェクト: MSLNZ/GTC
def line_fit_wls(x, y, u_y=None):
    """Weighted least-squares linear regression
    
    .. versionadded:: 1.2
    
    :arg x:     sequence of independent variable data 
    :arg y:     sequence of dependent variable data
    :arg u_y:   sequence of uncertainties in ``y``

    :rtype:    :class:`~type_b.LineFitWLS` 
    
    ``y`` must be a sequence of uncertain real numbers.

    Performs a weighted least-squares regression. 

    Weights are calculated from the uncertainty of 
    the ``y`` elements unless the sequence ``u_y`` 
    is provided. 
    
    .. note::

        The uncertainty in the parameter estimates is found
        by propagation of uncertainty *through* the regression
        formulae. This does **not** take account of the residuals.
        
        The function :func:`type_a.line_fit_wls` can be used to 
        carry out a regression analysis that obtains uncertainty in 
        the parameter estimates due to the residuals.
        
        If necessary, the results of both type-A and type-B 
        analyses can be merged (see :func:`type_a.merge`).
        
    **Example**::

        >>> x = [1,2,3,4,5,6]
        >>> y = [3.2, 4.3, 7.6, 8.6, 11.7, 12.8]
        >>> u_y = [0.5,0.5,0.5,1.0,1.0,1.0]
        >>> y = [ ureal(y_i,u_y_i) for y_i, u_y_i in zip(y,u_y) ]
        
        >>> fit = type_b.line_fit_wls(x,y)
        >>> a, b = fit.a_b
        >>> a
        ureal(0.8852320675105...,0.5297081435088...,inf)
        >>> b
        ureal(2.0569620253164...,0.1778920167412...,inf)
        
    """
    if len(x) != len(y):
        raise RuntimeError(
            "Different sequence lengths: len({!r}) != len({!r})".format(x, y))

    if u_y is None:
        v = [y_i.v for y_i in y]
        u = [math.sqrt(v_i) for v_i in v]
    else:
        v = [u_y_i**2 for u_y_i in u_y]
        u = u_y

    S = sum(1.0 / v_i for v_i in v)
    S_x = sum(x_i / v_i for x_i, v_i in izip(x, v))
    S_y = sum(y_i / v_i for y_i, v_i in izip(y, v))

    k = S_x / S
    t = [(x_i - k) / u_i for x_i, u_i in izip(x, u)]

    S_tt = sum(t_i * t_i for t_i in t)

    b = sum(t_i * y_i / u_i / S_tt for t_i, y_i, u_i in izip(t, y, u))
    a = (S_y - b * S_x) / S

    if not isinstance(a, UncertainReal):
        raise ValueError('"y" must be a sequence of uncertain real numbers. '
                         'You may want to use type_a.line_fit_wls instead.')

    # The sum of squared residuals is now calculated but not used
    float_a = value(a)
    float_b = value(b)

    f2 = lambda x_i, y_i, u_i: ((y_i - float_a - float_b * x_i) / u_i)**2

    ssr = math.fsum(
        f2(value(x_i), value(y_i), u_i) for x_i, y_i, u_i in izip(x, y, u))

    return LineFitWLS(a, b, ssr, len(x))
コード例 #11
0
ファイル: type_b.py プロジェクト: MSLNZ/GTC
def line_fit(x, y):
    """Least-squares fit intercept and slope 
    
    .. versionadded:: 1.2
    
    :arg x:     sequence of independent variable data 
    :arg y:     sequence of dependent variable data

    :rtype:     a :class:`~type_b.LineFitOLS`
    
    ``y`` must be a sequence of uncertain real numbers.

    Performs an ordinary least-squares regression. 
    
    .. note::

        Uncertainty in the parameter estimates is found
        by propagation *through* the regression
        formulae. This does **not** take residuals into account.
        
        The function :func:`type_a.line_fit` performs a regression 
        analysis that evaluates uncertainty in 
        the parameter estimates using the residuals.
        
        If appropriate, the results from both type-A and type-B 
        analyses can be merged (see :func:`type_a.merge`).
        
    **Example**::

        >>> a0 =10
        >>> b0 = -3
        >>> u0 = .2

        >>> x = [ float(x_i) for x_i in xrange(10) ]
        >>> y = [ ureal(b0*x_i + a0,u0) for x_i in x ]

        >>> a,b = tb.line_fit(x,y).a_b
        >>> a
        ureal(10.0,0.1175507627290...,inf)
        >>> b
        ureal(-3.0,0.02201927530252...,inf)
        
    """
    N = len(x)
    if N != len(y):
        raise RuntimeError(
            "Different sequence lengths: len({!r}) != len({!r})".format(x, y))

    S_x = sum(x)
    S_y = sum(y)

    k = S_x / N
    t = [x_i - k for x_i in x]

    S_tt = sum(t_i * t_i for t_i in t)

    b = sum(t_i * y_i / S_tt for t_i, y_i in izip(t, y))
    a = (S_y - b * S_x) / N

    if not isinstance(a, UncertainReal):
        raise ValueError('"y" must be a sequence of uncertain real numbers. '
                         'You may want to use type_a.line_fit instead.')

    # The sum of squared residuals is now calculated but not used
    float_a = value(a)
    float_b = value(b)

    f2 = lambda x_i, y_i: ((y_i - float_a - float_b * x_i))**2

    ssr = math.fsum(f2(value(x_i), value(y_i)) for x_i, y_i in izip(x, y))

    return LineFitOLS(a, b, ssr, N)