Esempio n. 1
0
def compensated_hd1(x, coeffs):
    """Performs the compensated ``HD`` algorithm when ``k = 1``.

    .. _JGH+13: https://dx.doi.org/10.1016/j.cam.2012.11.008

    See the `JGH+13`_ paper for more details on the ``HD`` algorithm and the
    ``CompHD`` algorithm.

    Here ``HD`` stands for "Horner derivative".
    """
    y1 = 0.0
    y2 = coeffs[0]
    e1 = 0.0  # y1_hat = y1 + e1
    e2 = 0.0  # y2_hat = y2 + e2

    for coeff in coeffs[1:-1]:
        # Update ``y1`` and ``e1``.
        prod, pi = eft.multiply_eft(x, y1)
        y1, sigma = eft.add_eft(prod, y2)
        e1 = x * e1 + e2 + (pi + sigma)
        # Update ``y2`` and ``e2``.
        prod, pi = eft.multiply_eft(x, y2)
        y2, sigma = eft.add_eft(prod, coeff)
        e2 = x * e2 + (pi + sigma)

    # Perform one last update of ``y1`` and ``e1``.
    prod, pi = eft.multiply_eft(x, y1)
    y1, sigma = eft.add_eft(prod, y2)
    e1 = x * e1 + e2 + (pi + sigma)

    # Return the compensated form of ``y1``.
    return y1 + e1
Esempio n. 2
0
def compensated_residual(s, coeffs1, t, coeffs2):
    x1, dx1 = de_casteljau._compensated_k(s, coeffs1[0, :], 2)
    y1, dy1 = de_casteljau._compensated_k(s, coeffs1[1, :], 2)
    x2, dx2 = de_casteljau._compensated_k(t, coeffs2[0, :], 2)
    y2, dy2 = de_casteljau._compensated_k(t, coeffs2[1, :], 2)

    dx, sigma = eft.add_eft(x1, -x2)
    tau = (dx1 - dx2) + sigma
    dx += tau
    dy, sigma = eft.add_eft(y1, -y2)
    tau = (dy1 - dy2) + sigma
    dy += tau

    return np.array([[dx], [dy]])
Esempio n. 3
0
def local_error_eft(errors, rho, delta_b):
    r"""Perform an error-free transformation for computing :math:`\ell`.

    This assumes, but does not check, that there are at least two
    ``errors``.
    """
    num_errs = len(errors)
    new_errors = [None] * (num_errs + 1)

    l_hat, new_errors[0] = eft.add_eft(errors[0], errors[1])
    for j in range(2, num_errs):
        l_hat, new_errors[j - 1] = eft.add_eft(l_hat, errors[j])

    prod, new_errors[num_errs - 1] = eft.multiply_eft(rho, delta_b)
    l_hat, new_errors[num_errs] = eft.add_eft(l_hat, prod)

    return new_errors, l_hat
Esempio n. 4
0
def count_add_eft():
    parent = operation_count.Computation()
    val1 = operation_count.Float(1.5, parent)
    val2 = operation_count.Float(0.5 + 0.5**52, parent)
    sum_, error = eft.add_eft(val1, val2)
    assert sum_.value == 2.0
    assert error.value == 0.5**52
    assert parent.count == 6
    print("     add_eft(): {}".format(parent.display))
Esempio n. 5
0
def pre_compensated_derivative(coeffs):
    degree = len(coeffs) - 1
    pk = []
    err_k = []
    for k in range(degree):
        delta_b, sigma = eft.add_eft(coeffs[k + 1], -coeffs[k])
        pk.append(delta_b)
        err_k.append(sigma)

    return pk, err_k
Esempio n. 6
0
def _compensated_derivative(s, pk, err_k):
    r, rho = eft.add_eft(1.0, -s)
    degree = len(pk)
    for k in range(1, degree):
        new_pk = []
        new_err_k = []
        for j in range(degree - k):
            # new_pk.append(r * pk[j] + s * pk[j + 1])
            prod1, d_pi1 = eft.multiply_eft(pk[j], r)
            prod2, d_pi2 = eft.multiply_eft(pk[j + 1], s)
            new_dp, d_sigma = eft.add_eft(prod1, prod2)
            new_pk.append(new_dp)
            d_ell = d_pi1 + d_pi2 + d_sigma + pk[j] * rho
            new_err = d_ell + s * err_k[j + 1] + r * err_k[j]
            new_err_k.append(new_err)

        # Update the "current" values.
        pk = new_pk
        err_k = new_err_k

    return degree, pk[0], err_k[0]
Esempio n. 7
0
def _compensated(x, coeffs):
    if not coeffs:
        return 0.0, [], []

    p = coeffs[0]
    e_pi = []
    e_sigma = []
    for coeff in coeffs[1:]:
        prod, e1 = eft.multiply_eft(p, x)
        p, e2 = eft.add_eft(prod, coeff)
        e_pi.append(e1)
        e_sigma.append(e2)

    return p, e_pi, e_sigma
Esempio n. 8
0
def _compensated_k(s, coeffs, K):
    r"""Performs a K-compensated de Casteljau.

    .. _JLCS10: https://doi.org/10.1016/j.camwa.2010.05.021

    Note that the order of operations exactly matches the `JLCS10`_ paper.
    For example, :math:`\widehat{\partial b}_j^{(k)}` is computed as

    .. math::

        \widehat{ell}_{1, j}^{(k)} \oplus \left(s \otimes
            \widehat{\partial b}_{j + 1}^{(k + 1)}\right) \oplus
            \left(\widehat{r} \otimes \widehat{\partial b}_j^{(k + 1)}\right)

    instead of "typical" order

    .. math::

        \left(\widehat{r} \otimes \widehat{\partial b}_j^{(k + 1)}\right)
            \oplus \left(s \otimes \widehat{\partial b}_{j + 1}^{(k + 1)}
            \right) \oplus \widehat{ell}_{1, j}^{(k)}.

    This is so that the term

    .. math::

        \widehat{r} \otimes \widehat{\partial b}_j^{(k + 1)}

    only has to be in one sum. We avoid an extra sum because
    :math:`\widehat{r}` already has round-off error.
    """
    r, rho = eft.add_eft(1.0, -s)

    degree = len(coeffs) - 1
    bk = {0: list(coeffs)}
    # NOTE: This will be shared, but is read only.
    all_zero = (0.0, ) * (degree + 1)
    for F in range(1, K - 1 + 1):
        bk[F] = all_zero

    for k in range(degree):
        new_bk = {F: [] for F in range(K - 1 + 1)}

        for j in range(degree - k):
            # Update the "level 0" stuff.
            P1, pi1 = eft.multiply_eft(r, bk[0][j])
            P2, pi2 = eft.multiply_eft(s, bk[0][j + 1])
            S3, sigma3 = eft.add_eft(P1, P2)
            new_bk[0].append(S3)

            errors = [pi1, pi2, sigma3]
            delta_b = bk[0][j]

            for F in range(1, K - 2 + 1):
                new_errors, l_hat = local_error_eft(errors, rho, delta_b)
                P1, pi1 = eft.multiply_eft(s, bk[F][j + 1])
                S2, sigma2 = eft.add_eft(l_hat, P1)
                P3, pi3 = eft.multiply_eft(r, bk[F][j])
                S, sigma4 = eft.add_eft(S2, P3)
                new_bk[F].append(S)

                new_errors.extend([pi1, sigma2, pi3, sigma4])
                errors = new_errors
                delta_b = bk[F][j]

            # Update the "level 2" stuff.
            l_hat = local_error(errors, rho, delta_b)
            new_bk[K - 1].append(l_hat + s * bk[K - 1][j + 1] +
                                 r * bk[K - 1][j])

        # Update the "current" values.
        bk = new_bk

    return tuple(bk[F][0] for F in range(K - 1 + 1))