Exemplo n.º 1
0
def test_dispatch_array_ufunc():
    """Test dispatch for ufuncs."""
    assert numpoly.sum(XY) == XY.__array_ufunc__(numpy.sum, "__call__", XY)
    with raises(numpoly.FeatureNotSupported):
        XY.__array_ufunc__(numpy.sum, "not_a_method", XY)
    with raises(numpoly.FeatureNotSupported):
        XY.__array_ufunc__(object, "__call__", XY)
Exemplo n.º 2
0
 def _mom(self, kloc, dist, scale, shift, parameters):
     del parameters
     poly = numpoly.variable(len(self))
     poly = numpoly.sum(scale*poly, axis=-1)+shift
     poly = numpoly.set_dimensions(numpoly.prod(poly**kloc), len(self))
     out = sum(dist._get_mom(key)*coeff
               for key, coeff in zip(poly.exponents, poly.coefficients))
     return out
Exemplo n.º 3
0
def fit_quadrature(orth, nodes, weights, solves, retall=False, norms=None, **kws):
    """
    Using spectral projection to create a polynomial approximation over
    distribution space.

    Args:
        orth (numpoly.ndpoly):
            Orthogonal polynomial expansion. Must be orthogonal for the
            approximation to be accurate.
        nodes (numpy.ndarray):
            Where to evaluate the polynomial expansion and model to
            approximate. ``nodes.shape==(D,K)`` where ``D`` is the number of
            dimensions and ``K`` is the number of nodes.
        weights (numpy.ndarray):
            Weights when doing numerical integration. ``weights.shape == (K,)``
            must hold.
        solves (numpy.ndarray):
            The model evaluation to approximate. If `numpy.ndarray` is
            provided, it must have ``len(solves) == K``. If callable, it must
            take a single argument X with ``len(X) == D``, and return
            a consistent numpy compatible shape.
        norms (numpy.ndarray):
            In the of TTR using coefficients to estimate the polynomial norm is
            more stable than manual calculation. Calculated using quadrature if
            no provided. ``norms.shape == (len(orth),)`` must hold.

    Returns:
        (numpoly.ndpoly):
            Fitted model approximation in the form of an polynomial.
    """
    orth = numpoly.polynomial(orth)
    nodes = numpy.asfarray(nodes)
    weights = numpy.asfarray(weights)

    if callable(solves):
        solves = [solves(node) for node in nodes.T]
    solves = numpy.asfarray(solves)

    shape = solves.shape
    solves = solves.reshape(weights.size, int(solves.size/weights.size))

    ovals = orth(*nodes)
    vals1 = [(val*solves.T*weights).T for val in ovals]

    if norms is None:
        norms = numpy.sum(ovals**2*weights, -1)
    else:
        norms = numpy.array(norms).flatten()

    coefs = (numpy.sum(vals1, 1).T/norms).T
    coefs = coefs.reshape(len(coefs), *shape[1:])
    approx_model = numpoly.sum(orth*coefs.T, -1).T

    if retall:
        return approx_model, coefs
    return approx_model
Exemplo n.º 4
0
    def _mom(self, kloc, mean, sigma, cache):
        poly = numpoly.variable(len(self))
        cholesky = numpy.linalg.cholesky(self._covariance)
        poly = numpoly.sum(cholesky * poly, axis=-1) + mean

        poly = numpoly.set_dimensions(numpoly.prod(poly**kloc), len(self))
        out = sum(
            self._dist.mom(key) * coeff
            for key, coeff in zip(poly.exponents, poly.coefficients))
        return out
Exemplo n.º 5
0
def inner(a: PolyLike, b: PolyLike) -> ndpoly:
    """
    Inner product of two arrays.

    Ordinary inner product of vectors for 1-D arrays (without complex
    conjugation), in higher dimensions a sum product over the last axes.

    """
    a, b = numpoly.align_exponents(a, b)
    return numpoly.sum(numpoly.multiply(a, b), axis=-1)
Exemplo n.º 6
0
def matmul(
    x1: PolyLike,
    x2: PolyLike,
    out: Optional[ndpoly] = None,
    **kwargs: Any,
) -> ndpoly:
    """
    Matrix product of two arrays.

    Args:
        x1, x2:
            Input arrays, scalars not allowed.
        out:
            A location into which the result is stored. If provided, it must
            have a shape that matches the signature `(n,k),(k,m)->(n,m)`.
            If not provided or `None`, a freshly-allocated array is returned.

    Returns:
        The matrix product of the inputs. This is a scalar only when both
        x1, x2 are 1-d vectors.

    Raises:
        ValueError:
            If the last dimension of `x1` is not the same size as
            the second-to-last dimension of `x2`.

    Examples:
        >>> poly = numpoly.variable(4).reshape(2, 2)
        >>> poly
        polynomial([[q0, q1],
                    [q2, q3]])
        >>> numpoly.matmul(poly, [[0, 1], [2, 3]])
        polynomial([[2*q1, 3*q1+q0],
                    [2*q3, 3*q3+q2]])
        >>> numpoly.matmul(poly, [4, 5])
        polynomial([[4*q1+4*q0, 5*q1+5*q0],
                    [4*q3+4*q2, 5*q3+5*q2]])
        >>> numpoly.matmul(*poly)
        polynomial([q1*q2+q0*q2, q1*q3+q0*q3])

    """
    x1 = numpoly.aspolynomial(x1)
    x2 = numpoly.aspolynomial(x2)
    if not x1.shape:
        raise ValueError(ERROR_MESSAGE % 0)
    if not x2.shape:
        raise ValueError(ERROR_MESSAGE % 1)
    x1 = numpoly.reshape(x1, x1.shape+(1,))
    x2 = numpoly.reshape(x2, x2.shape[:-2]+(1,)+x2.shape[-2:])
    x1, x2 = numpoly.broadcast_arrays(x1, x2)
    out_ = numpoly.multiply(x1, x2, out=out, **kwargs)
    return numpoly.sum(out_, axis=-2)
Exemplo n.º 7
0
def polynomial_from_roots(
    seq_of_zeros: Sequence[int],
    dtype: Optional[numpy.typing.DTypeLike] = None,
) -> ndpoly:
    """
    Find the coefficients of a polynomial with the given sequence of roots.

    Returns the coefficients of the polynomial whose leading coefficient is one
    for the given sequence of zeros (multiple roots must be included in the
    sequence as many times as their multiplicity; see Examples). A square
    matrix (or array, which will be treated as a matrix) can also be given, in
    which case the coefficients of the characteristic polynomial of the matrix
    are returned.

    Args:
        seq_of_zeros:
            A sequence of polynomial roots, or a square array or matrix object.
            Either shape (N,) or (N, N).
        dtype:
            Any object that can be interpreted as a numpy data type.

    Returns:
        1-D polynomial which have `seq_of_zeros` as roots.
        Leading coefficient is always 1.

    Raises:
        ValueError:
            If input is the wrong shape (the input must be a 1-D or square
            2-D array).

    Examples:
        >>> numpoly.polynomial_from_roots((0, 0, 0))
        polynomial(q0**3)
        >>> numpoly.polynomial_from_roots((-0.5, 0, 0.5))
        polynomial(q0**3-0.25*q0)

    """
    exponent = numpy.arange(len(seq_of_zeros), -1, -1, dtype=int)
    basis = numpoly.variable(dtype=dtype)**exponent
    return numpoly.sum(numpy.poly(seq_of_zeros)*basis)
Exemplo n.º 8
0
def test_dispatch_array_function():
    """Test dispatch for functions."""
    assert numpoly.sum(XY) == XY.__array_function__(numpy.sum, (int, ), (XY, ),
                                                    {})
    with raises(numpoly.FeatureNotSupported):
        XY.__array_function__(object, (int, ), (XY, ), {})
Exemplo n.º 9
0
def E_cond(poly, freeze, dist, **kws):
    """
    Conditional expected value of a distribution or polynomial.

    1st order statistics of a polynomial on a given probability space
    conditioned on some of the variables.

    Args:
        poly (numpoly.ndpoly):
            Polynomial to find conditional expected value on.
        freeze (numpy.ndpoly):
            Boolean values defining the conditional variables. True values
            implies that the value is conditioned on, e.g. frozen during the
            expected value calculation.
        dist (Distribution) :
            The distributions of the input used in ``poly``.

    Returns:
        (numpoly.ndpoly) :
            Same as ``poly``, but with the variables not tagged in ``frozen``
            integrated away.

    Examples:
        >>> q0, q1 = chaospy.variable(2)
        >>> poly = chaospy.polynomial([1, q0, q1, 10*q0*q1-1])
        >>> poly
        polynomial([1, q0, q1, 10*q0*q1-1])
        >>> dist = chaospy.J(chaospy.Gamma(1, 1), chaospy.Normal(0, 2))
        >>> chaospy.E_cond(poly, q0, dist)
        polynomial([1.0, q0, 0.0, -1.0])
        >>> chaospy.E_cond(poly, q1, dist)
        polynomial([1.0, 1.0, q1, 10.0*q1-1.0])
        >>> chaospy.E_cond(poly, [q0, q1], dist)
        polynomial([1, q0, q1, 10*q0*q1-1])
        >>> chaospy.E_cond(poly, [], dist)
        polynomial([1.0, 1.0, 0.0, -1.0])
        >>> chaospy.E_cond(4, [], dist)
        array(4)

    """
    poly = numpoly.set_dimensions(poly, len(dist))
    if poly.isconstant():
        return poly.tonumpy()
    assert not dist.stochastic_dependent, dist

    freeze = numpoly.aspolynomial(freeze)
    if not freeze.size:
        return numpoly.polynomial(chaospy.E(poly, dist))
    if not freeze.isconstant():
        freeze = [
            name in freeze.names
            for name in sorted(poly.names, key=lambda x: int(x[1:]))
        ]

    else:
        freeze = freeze.tonumpy()
    freeze = numpy.asarray(freeze, dtype=bool)

    # decompose into frozen and unfrozen part
    poly = numpoly.decompose(poly)
    unfrozen = poly(**{("q%d" % idx): 1
                       for idx, keep in enumerate(freeze) if keep})
    frozen = poly(**{("q%d" % idx): 1
                     for idx, keep in enumerate(freeze) if not keep})

    # if no unfrozen, poly will return numpy.ndarray instead of numpoly.ndpoly
    if not isinstance(unfrozen, numpoly.ndpoly):
        return numpoly.sum(frozen, 0)

    # Remove frozen coefficients, such that poly == sum(frozen*unfrozen) holds
    for key in unfrozen.keys:
        unfrozen.values[key] = unfrozen.values[key] != 0
    return numpoly.sum(frozen * expected.E(unfrozen, dist), 0)
Exemplo n.º 10
0
def lagrange_polynomial(abscissas, graded=True, reverse=True, sort=None):
    """
    Create Lagrange polynomials.

    Args:
        abscissas (numpy.ndarray):
            Sample points where the Lagrange polynomials shall be defined.
        graded (bool):
            Graded sorting, meaning the indices are always sorted by the index
            sum. E.g. ``q0**2*q1**2*q2**2`` has an exponent sum of 6, and will
            therefore be consider larger than both ``q0**2*q1*q2``,
            ``q0*q1**2*q2`` and ``q0*q1*q2**2``, which all have exponent sum of
            5.
        reverse (bool):
            Reverse lexicographical sorting meaning that ``q0*q1**3`` is
            considered bigger than ``q0**3*q1``, instead of the opposite.

    Example:
        >>> chaospy.lagrange_polynomial([4]).round(4)
        polynomial([4.0])
        >>> chaospy.lagrange_polynomial([-10, 10]).round(4)
        polynomial([-0.05*q0+0.5, 0.05*q0+0.5])
        >>> chaospy.lagrange_polynomial([-1, 0, 1]).round(4)
        polynomial([0.5*q0**2-0.5*q0, -q0**2+1.0, 0.5*q0**2+0.5*q0])
        >>> poly = chaospy.lagrange_polynomial([[1, 0, 1], [0, 1, 2]])
        >>> poly.round(4)
        polynomial([-0.5*q1+0.5*q0+0.5, -q0+1.0, 0.5*q1+0.5*q0-0.5])
        >>> poly([1, 0, 1], [0, 1, 2]).round(14)
        array([[1., 0., 0.],
               [0., 1., 0.],
               [0., 0., 1.]])
        >>> nodes = numpy.array([[ 0.17,  0.15,  0.17,  0.19],
        ...                      [14.94, 16.69, 16.69, 16.69]])
        >>> poly = chaospy.lagrange_polynomial(nodes)  # doctest: +IGNORE_EXCEPTION_DETAIL
        Traceback (most recent call last):
            ...
        LinAlgError: Lagrange abscissas resulted in invertible matrix
    """
    abscissas = numpy.asfarray(abscissas)
    if len(abscissas.shape) == 1:
        abscissas = abscissas.reshape(1, abscissas.size)
    dim, size = abscissas.shape

    order = 1
    while comb(order + dim, dim) < size:
        order += 1

    indices = numpoly.glexindex(0,
                                order + 1,
                                dimensions=dim,
                                graded=graded,
                                reverse=reverse)[:size]
    idx, idy = numpy.mgrid[:size, :size]

    matrix = numpy.prod(abscissas.T[idx]**indices[idy], -1)
    det = numpy.linalg.det(matrix)
    if det == 0:
        raise numpy.linalg.LinAlgError(
            "Lagrange abscissas resulted in invertible matrix")

    vec = numpoly.monomial(0,
                           order + 1,
                           dimensions=dim,
                           graded=graded,
                           reverse=reverse)[:size]

    coeffs = numpy.zeros((size, size))

    if size == 1:
        out = numpoly.monomial(
            0, 1, dimensions=dim, graded=graded,
            reverse=reverse) * abscissas.item()

    elif size == 2:
        coeffs = numpy.linalg.inv(matrix)
        out = numpoly.sum(vec * (coeffs.T), 1)

    else:
        for i in range(size):
            if i % 2 != 0:
                k = 1
            else:
                k = 0
            for j in range(size):
                if k % 2 == 0:
                    coeffs[i, j] += numpy.linalg.det(matrix[1:, 1:])
                else:
                    if size % 2 == 0:
                        coeffs[i, j] += -numpy.linalg.det(matrix[1:, 1:])
                    else:
                        coeffs[i, j] += numpy.linalg.det(matrix[1:, 1:])
                matrix = numpy.roll(matrix, -1, axis=0)
                k += 1
            matrix = numpy.roll(matrix, -1, axis=1)
        coeffs /= det
        out = numpoly.sum(vec * (coeffs.T), 1)

    return out
Exemplo n.º 11
0
def fit_quadrature(
        orth,
        nodes,
        weights,
        solves,
        retall=False,
        norms=None
):
    """
    Fit polynomial chaos expansion using spectral projection.

    Create a polynomial approximation model from orthogonal expansion, quadrature nodes and weights.

    Args:
        orth (numpoly.ndpoly):
            Orthogonal polynomial expansion. Must be orthogonal for the
            approximation to be accurate.
        nodes (numpy.ndarray):
            Where to evaluate the polynomial expansion and model to
            approximate. ``nodes.shape==(D, K)`` where ``D`` is the number of
            dimensions and ``K`` is the number of nodes.
        weights (numpy.ndarray):
            Weights when doing numerical integration. ``weights.shape == (K,)``
            must hold.
        solves (numpy.ndarray):
            The model evaluation to approximate. If `numpy.ndarray` is
            provided, it must have ``len(solves) == K``.
        retall (int):
            What the function should return.
            0: only return fitted polynomials, with shape `evals.shape[1:]`.
            1: polynomials, and Fourier coefficients,
            2: polynomials, coefficients and polynomial evaluations.
        norms (numpy.ndarray):
            Three terms recurrence method produces norms more stable than the
            ones calculated from the polynomials themselves. Calculated from
            quadrature if not provided. ``norms.shape == (len(orth),)`` must
            hold.

    Returns:
        (numpoly.ndpoly):
            Fitted model approximation in the form of an polynomial.

    """
    orth = numpoly.polynomial(orth)
    assert orth.ndim == 1
    weights = numpy.asfarray(weights)
    assert weights.ndim == 1
    solves = numpy.asfarray(solves)
    nodes = numpy.atleast_2d(nodes)
    assert nodes.ndim == 2
    assert nodes.shape[1] == len(weights) == len(solves)

    shape = solves.shape[1:]
    solves = solves.reshape(len(solves), -1)

    ovals = orth(*nodes)
    vals1 = [(val*solves.T*weights).T for val in ovals]

    if norms is None:
        norms = numpy.sum(ovals**2*weights, -1)
    norms = numpy.asfarray(norms)
    assert norms.ndim == 1

    coeffs = (numpy.sum(vals1, 1).T/norms).T
    coeffs = coeffs.reshape(len(coeffs), *shape)
    approx_model = numpoly.sum(orth*coeffs.T, -1).T

    choices = {0: approx_model,
               1: (approx_model, coeffs),
               2: (approx_model, coeffs, ovals)}
    return choices[retall]
Exemplo n.º 12
0
def fit_regression(
    polynomials,
    abscissas,
    evals,
    model=None,
    retall=0,
):
    """
    Fit a polynomial chaos expansion using linear regression.

    Args:
        polynomials (numpoly.ndpoly):
            Polynomial expansion with ``polynomials.shape == (M,)`` and
            `polynomials.dim=D`.
        abscissas (numpy.ndarray):
            Collocation nodes with ``abscissas.shape == (D, K)``.
        evals (numpy.ndarray):
            Model evaluations with ``len(evals) == K``.
        model (Optional[sklearn.base.BaseEstimator]):
            By default regression is done using the classical least-square
            method. However, if provided, and `sklearn` regression model can be
            used instead.
        retall (int):
            What the function should return.
            0: only return fitted polynomials, with shape `evals.shape[1:]`.
            1: polynomials, and Fourier coefficients,
            2: polynomials, coefficients and polynomial evaluations.

    Returns:
        (chaospy.ndpoly, numpy.ndarray, numpy.ndarray):
            Returned value as determined by `retval`.

    Examples:
        >>> x, y = chaospy.variable(2)
        >>> polynomials = chaospy.polynomial([1, x, y])
        >>> abscissas = [[-1, -1, 1, 1],  [-1, 1, -1, 1]]
        >>> evals = [0, 1, 1, 2]
        >>> chaospy.fit_regression(polynomials, abscissas, evals).round(14)
        polynomial(0.5*q1+0.5*q0+1.0)
        >>> model = sklearn.linear_model.LinearRegression(fit_intercept=False)
        >>> chaospy.fit_regression(
        ...     polynomials, abscissas, evals, model=model).round(14)
        polynomial(0.5*q1+0.5*q0+1.0)

    """
    abscissas = numpy.atleast_2d(abscissas)
    assert abscissas.ndim == 2, "too many dimensions"

    polynomials = numpoly.aspolynomial(polynomials)

    evals = numpy.asarray(evals)
    assert abscissas.shape[-1] == len(evals)

    poly_evals = polynomials(*abscissas).T
    shape = evals.shape[1:]
    if shape:
        evals = evals.reshape(len(evals), -1)

    if model is None:
        uhat, _, _, _ = numpy.linalg.lstsq(poly_evals, evals, rcond=None)

    else:
        try:
            from sklearn.base import BaseEstimator
        except ImportError:  # pragma: no cover
            raise ValueError(
                "arg model != None requires that scikit-learn is installed")
        assert isinstance(
            model,
            BaseEstimator), ("model not recognized; "
                             "Optional[sklearn.base.BaseEstimator] expected")
        if hasattr(model, "fit_intercept"):
            assert not model.fit_intercept, (
                "requires %s(fit_intercept=False)" % model.__class__.__name__)
        uhat = numpy.transpose(model.fit(poly_evals, evals).coef_)

    approx_model = numpoly.sum((polynomials * uhat.T), -1).reshape(shape)
    choices = {
        0: approx_model,
        1: (approx_model, uhat),
        2: (approx_model, uhat, poly_evals),
    }
    return choices[retall]
Exemplo n.º 13
0
def fit_regression(
    polynomials,
    abscissas,
    evals,
    model=None,
    retall=False,
):
    """
    Fit a polynomial chaos expansion using linear regression.

    Args:
        polynomials (numpoly.ndpoly):
            Polynomial expansion with ``polynomials.shape == (M,)`` and
            `polynomials.dim=D`.
        abscissas (numpy.ndarray):
            Collocation nodes with ``abscissas.shape == (D, K)``.
        evals (numpy.ndarray):
            Model evaluations with ``len(evals) == K``.
        model (Optional[sklearn.base.BaseEstimator]):
            By default regression is done using the classical least-square
            method. However, if provided, and `sklearn` regression model can be
            used instead.
        retall (bool):
            If True return Fourier coefficients in addition to R.

    Returns:
        (chaospy.ndpoly, numpy.ndarray):
            Fitted polynomial with ``R.shape=evals.shape[1:]`` and ``R.dim=D``.
            The Fourier coefficients in the estimation.

    Examples:
        >>> x, y = chaospy.variable(2)
        >>> polynomials = chaospy.polynomial([1, x, y])
        >>> abscissas = [[-1,-1,1,1], [-1,1,-1,1]]
        >>> evals = [0,1,1,2]
        >>> chaospy.fit_regression(polynomials, abscissas, evals).round(14)
        polynomial(0.5*q1+0.5*q0+1.0)

    """
    logger = logging.getLogger(__name__)
    abscissas = numpy.asarray(abscissas)
    if len(abscissas.shape) == 1:
        abscissas = abscissas.reshape(1, *abscissas.shape)
    evals = numpy.array(evals)

    poly_evals = polynomials(*abscissas).T
    shape = evals.shape[1:]
    if shape:
        evals = evals.reshape(evals.shape[0], int(numpy.prod(evals.shape[1:])))

    if model is None:
        uhat = linalg.lstsq(poly_evals, evals)[0]

    else:
        try:
            from sklearn.base import BaseEstimator
        except ImportError:
            raise ValueError(
                "arg model != None requires that scikit-learn is installed")

        if not isinstance(model, BaseEstimator):
            raise ValueError("model not recognized; "
                             "Optional[sklearn.base.BaseEstimator] expected")
        if hasattr(model, "fit_intercept"):
            assert not model.fit_intercept, (
                "model %s must have fit_intercept=False" %
                model.__class__.__name__)
        uhat = model.fit(poly_evals, evals).coef_.T

    if shape:
        evals = evals.reshape(evals.shape[0], *shape)

    approx_model = numpoly.sum((polynomials * uhat.T), -1)
    approx_model = approx_model.reshape(shape)

    if retall == 1:
        return approx_model, uhat
    if retall == 2:
        return approx_model, uhat, poly_evals
    return approx_model