Beispiel #1
0
    def test_krawtchouk(self):
        num_coef = 6
        ntrials = 10
        p = 0.5

        xk = np.array(range(ntrials + 1), dtype='float')
        pk = stats.binom.pmf(xk, ntrials, p)

        ab_lanczos = lanczos(xk, pk, num_coef)
        ab_stieltjes = stieltjes(xk, pk, num_coef)

        ab_exact = krawtchouk_recurrence(num_coef, ntrials, p)

        # ab_lanczos[-1, 0] is a dummy entry so set to exact so
        # comparison will pass if all other entries are correct
        ab_lanczos[-1, 0] = ab_exact[-1, 0]

        assert np.allclose(ab_lanczos, ab_exact)
        assert np.allclose(ab_stieltjes, ab_exact)

        x, w = gauss_quadrature(ab_lanczos, num_coef)
        moments = np.array([(x**ii).dot(w) for ii in range(num_coef)])
        true_moments = np.array([(xk**ii).dot(pk) for ii in range(num_coef)])
        assert np.allclose(moments, true_moments)
        p = evaluate_orthonormal_polynomial_1d(x, num_coef - 1, ab_lanczos)
        assert np.allclose((p.T * w).dot(p), np.eye(num_coef))
        p = evaluate_orthonormal_polynomial_1d(xk, num_coef - 1, ab_lanczos)
        assert np.allclose((p.T * pk).dot(p), np.eye(num_coef))
Beispiel #2
0
    def test_discrete_chebyshev(self):
        num_coef = 5
        nmasses = 10

        xk = np.array(range(nmasses), dtype='float')
        pk = np.ones(nmasses) / nmasses

        ab_lanczos = lanczos(xk, pk, num_coef)
        ab_stieltjes = stieltjes(xk, pk, num_coef)

        ab_exact = discrete_chebyshev_recurrence(num_coef, nmasses)

        # ab_lanczos[-1, 0] is a dummy entry so set to exact so
        # comparison will pass if all other entries are correct
        ab_lanczos[-1, 0] = ab_exact[-1, 0]

        assert np.allclose(ab_lanczos, ab_exact)
        assert np.allclose(ab_stieltjes, ab_exact)

        x, w = gauss_quadrature(ab_lanczos, num_coef)
        moments = np.array([(x**ii).dot(w) for ii in range(num_coef)])
        true_moments = np.array([(xk**ii).dot(pk) for ii in range(num_coef)])
        assert np.allclose(moments, true_moments)
        p = evaluate_orthonormal_polynomial_1d(x, num_coef - 1, ab_lanczos)
        assert np.allclose((p.T * w).dot(p), np.eye(num_coef))
        p = evaluate_orthonormal_polynomial_1d(xk, num_coef - 1, ab_lanczos)
        assert np.allclose((p.T * pk).dot(p), np.eye(num_coef))
    def test_orthonormality_physicists_hermite_polynomial(self):
        rho = 0.
        degree = 2
        probability_measure = False

        ab = hermite_recurrence(degree + 1,
                                rho,
                                probability=probability_measure)
        x, w = np.polynomial.hermite.hermgauss(degree + 1)

        p = evaluate_orthonormal_polynomial_1d(x, degree, ab)
        p_exact = np.asarray([1 + 0. * x, 2 * x, 4. * x**2 - 2]).T / np.sqrt(
            sp.factorial(np.arange(degree + 1)) * np.sqrt(np.pi) *
            2**np.arange(degree + 1))

        assert np.allclose(p, p_exact)

        # test orthogonality
        exact_moments = np.zeros((degree + 1))
        # basis is orthonormal so integration of constant basis will be
        # non-zero but will not integrate to 1.0
        exact_moments[0] = np.pi**0.25
        assert np.allclose(np.dot(p.T, w), exact_moments)
        # test orthonormality
        assert np.allclose(np.dot(p.T * w, p), np.eye(degree + 1))
    def test_compute_univariate_orthonormal_basis_products(self):
        max_degree1, max_degree2 = 3, 2

        get_recursion_coefficients = partial(jacobi_recurrence,
                                             alpha=0.,
                                             beta=0.,
                                             probability=True)

        product_coefs = compute_univariate_orthonormal_basis_products(
            get_recursion_coefficients, max_degree1, max_degree2)

        max_degree = max_degree1 + max_degree2
        x = np.linspace(-1, 1, 51)
        recursion_coefs = get_recursion_coefficients(max_degree + 1)
        ortho_basis_matrix = evaluate_orthonormal_polynomial_1d(
            x, max_degree, recursion_coefs)

        kk = 0
        for d1 in range(max_degree1 + 1):
            for d2 in range(min(d1 + 1, max_degree2 + 1)):
                exact_product = \
                    ortho_basis_matrix[:, d1]*ortho_basis_matrix[:, d2]

                product = np.dot(
                    ortho_basis_matrix[:, :product_coefs[kk].shape[0]],
                    product_coefs[kk]).sum(axis=1)
                assert np.allclose(product, exact_product)
                kk += 1
    def test_orthonormality_asymetric_jacobi_polynomial(self):
        from scipy.stats import beta as beta_rv
        alpha = 4.
        beta = 1.
        degree = 3
        probability_measure = True

        ab = jacobi_recurrence(degree + 1,
                               alpha=alpha,
                               beta=beta,
                               probability=probability_measure)

        x, w = np.polynomial.legendre.leggauss(10 * degree)
        p = evaluate_orthonormal_polynomial_1d(x, degree, ab)
        w *= beta_rv.pdf((x + 1.) / 2., a=beta + 1, b=alpha + 1) / 2.

        # test orthogonality
        exact_moments = np.zeros((degree + 1))
        exact_moments[0] = 1.0
        assert np.allclose(np.dot(p.T, w), exact_moments)
        # test orthonormality
        assert np.allclose(np.dot(p.T * w, p), np.eye(degree + 1))

        assert np.allclose(
            evaluate_orthonormal_polynomial_deriv_1d(x, degree, ab, 0), p)
    def test_krawtchouk_binomial(self):
        degree = 4
        num_trials = 10
        prob_success = 0.5
        ab = krawtchouk_recurrence(degree + 1, num_trials, prob_success)
        x, w = gauss_quadrature(ab, degree + 1)

        probability_mesh = np.arange(0, num_trials + 1, dtype=float)
        probability_masses = binom.pmf(probability_mesh, num_trials,
                                       prob_success)

        basis_mat = evaluate_orthonormal_polynomial_1d(probability_mesh,
                                                       degree, ab)
        assert np.allclose(
            (basis_mat * probability_masses[:, None]).T.dot(basis_mat),
            np.eye(basis_mat.shape[1]))

        coef = np.random.uniform(-1, 1, (degree + 1))
        basis_matrix_at_pm = univariate_monomial_basis_matrix(
            degree, probability_mesh)
        vals_at_pm = basis_matrix_at_pm.dot(coef)
        basis_matrix_at_gauss = univariate_monomial_basis_matrix(degree, x)
        vals_at_gauss = basis_matrix_at_gauss.dot(coef)

        true_mean = vals_at_pm.dot(probability_masses)
        quadrature_mean = vals_at_gauss.dot(w)
        # print (true_mean, quadrature_mean)
        assert np.allclose(true_mean, quadrature_mean)
    def test_orthonormality_probabilists_hermite_polynomial(self):
        rho = 0.
        degree = 2
        probability_measure = True
        ab = hermite_recurrence(degree + 1,
                                rho,
                                probability=probability_measure)

        x, w = np.polynomial.hermite.hermgauss(degree + 1)
        # transform rule to probablity weight
        # function w=1/sqrt(2*PI)exp(-x^2/2)
        x *= np.sqrt(2.0)
        w /= np.sqrt(np.pi)
        p = evaluate_orthonormal_polynomial_1d(x, degree, ab)

        # Note if using pecos the following is done (i.e. ptFactpr=sqrt(2)),
        # but if I switch to using orthonormal recursion, used here, in Pecos
        # then I will need to set ptFactor=1.0 as done implicitly above
        p_exact = np.asarray([1 + 0. * x, x, x**2 - 1]).T / np.sqrt(
            sp.factorial(np.arange(degree + 1)))
        assert np.allclose(p, p_exact)

        # test orthogonality
        exact_moments = np.zeros((degree + 1))
        exact_moments[0] = 1.0
        assert np.allclose(np.dot(p.T, w), exact_moments)
        # test orthonormality
        print(np.allclose(np.dot(p.T * w, p), np.eye(degree + 1)))
        assert np.allclose(np.dot(p.T * w, p), np.eye(degree + 1))
Beispiel #8
0
def compute_univariate_orthonormal_basis_products(get_recursion_coefficients,
                                                  max_degree1, max_degree2):
    """
    Compute all the products of univariate orthonormal bases and re-express
    them as expansions using the orthnormal basis.
    """
    assert max_degree1 >= max_degree2
    max_degree = max_degree1 + max_degree2
    num_quad_points = max_degree + 1

    recursion_coefs = get_recursion_coefficients(num_quad_points)
    x_quad, w_quad = gauss_quadrature(recursion_coefs, num_quad_points)
    w_quad = w_quad[:, np.newaxis]

    # evaluate the orthonormal basis at the quadrature points. This can
    # be computed once for all degrees up to the maximum degree
    ortho_basis_matrix = evaluate_orthonormal_polynomial_1d(
        x_quad, max_degree, recursion_coefs)

    # compute coefficients of orthonormal basis using pseudo
    # spectral projection
    product_coefs = []
    for d1 in range(max_degree1 + 1):
        for d2 in range(min(d1 + 1, max_degree2 + 1)):
            product_vals = ortho_basis_matrix[:, d1] * ortho_basis_matrix[:,
                                                                          d2]
            coefs = w_quad.T.dot(product_vals[:, np.newaxis] *
                                 ortho_basis_matrix[:, :d1 + d2 + 1]).T
            product_coefs.append(coefs)
    return product_coefs
 def test_discrete_chebyshev(self):
     N, degree = 100, 5
     xk, pk = np.arange(N), np.ones(N) / N
     rv = float_rv_discrete(name='discrete_chebyshev', values=(xk, pk))
     ab = discrete_chebyshev_recurrence(degree + 1, N)
     p = evaluate_orthonormal_polynomial_1d(xk, degree, ab)
     w = rv.pmf(xk)
     assert np.allclose(np.dot(p.T * w, p), np.eye(degree + 1))
Beispiel #10
0
    def test_rv_discrete_large_moments(self):
        """
        When Modified_chebyshev_orthonormal is used when the moments of
        discrete variable are very large it will fail. To avoid this
        rescale the variables to [-1,1] like is done for continuous
        random variables
        """
        N, degree = 100, 5
        xk, pk = np.arange(N), np.ones(N) / N
        rv = float_rv_discrete(name='float_rv_discrete', values=(xk, pk))
        xk_canonical = xk / (N - 1) * 2 - 1
        ab = modified_chebyshev_orthonormal(degree + 1, [xk_canonical, pk])
        p = evaluate_orthonormal_polynomial_1d(xk_canonical, degree, ab)
        w = rv.pmf(xk)
        assert np.allclose(np.dot(p.T * w, p), np.eye(degree + 1))

        ab = predictor_corrector(degree + 1, (xk_canonical, pk),
                                 xk_canonical.min(), xk_canonical.max())
        p = evaluate_orthonormal_polynomial_1d(xk_canonical, degree, ab)
        assert np.allclose(np.dot(p.T * w, p), np.eye(degree + 1))
Beispiel #11
0
 def test_continuous_rv_sample(self):
     N, degree = int(1e6), 5
     xk, pk = np.random.normal(0, 1, N), np.ones(N) / N
     ab = modified_chebyshev_orthonormal(degree + 1, [xk, pk])
     hermite_ab = hermite_recurrence(degree + 1, 0, True)
     x, w = gauss_quadrature(hermite_ab, degree + 1)
     p = evaluate_orthonormal_polynomial_1d(x, degree, ab)
     gaussian_moments = np.zeros(degree + 1)
     gaussian_moments[0] = 1
     assert np.allclose(p.T.dot(w), gaussian_moments, atol=1e-2)
     assert np.allclose(np.dot(p.T * w, p), np.eye(degree + 1), atol=7e-2)
    def test_convert_orthonormal_recurence_to_three_term_recurence(self):
        rho = 0.
        degree = 2
        probability_measure = True
        ab = hermite_recurrence(degree + 1,
                                rho,
                                probability=probability_measure)
        abc = convert_orthonormal_recurence_to_three_term_recurence(ab)

        x = np.linspace(-3, 3, 101)
        p_2term = evaluate_orthonormal_polynomial_1d(x, degree, ab)
        p_3term = evaluate_three_term_recurrence_polynomial_1d(abc, degree, x)
        assert np.allclose(p_2term, p_3term)
 def test_charlier(self):
     # Note as rate gets smaller the number of terms that can be accurately
     # computed will decrease because the problem gets more ill conditioned.
     # This is caused because the number of masses with significant weights
     # gets smaller as rate does
     degree, rate = 5, 2
     rv = poisson(rate)
     ab = charlier_recurrence(degree + 1, rate)
     lb, ub = rv.interval(1 - np.finfo(float).eps)
     x = np.linspace(lb, ub, int(ub - lb + 1))
     p = evaluate_orthonormal_polynomial_1d(x, degree, ab)
     w = rv.pmf(x)
     # print(np.absolute(np.dot(p.T*w,p)-np.eye(degree+1)).max())
     assert np.allclose(np.dot(p.T * w, p), np.eye(degree + 1), atol=1e-7)
Beispiel #14
0
    def test_float_rv_discrete(self):
        num_coef, nmasses = 5, 10
        # works for both lanczos and chebyshev algorithms
        # xk   = np.geomspace(1,512,num=nmasses)
        # pk   = np.ones(nmasses)/nmasses

        # works only for chebyshev algorithms
        pk = np.geomspace(1, 512, num=nmasses)
        pk /= pk.sum()
        xk = np.arange(0, nmasses)

        # ab  = lanczos(xk,pk,num_coef)
        ab = modified_chebyshev_orthonormal(num_coef, [xk, pk],
                                            probability=True)

        x, w = gauss_quadrature(ab, num_coef)
        moments = np.array([(x**ii).dot(w) for ii in range(num_coef)])
        true_moments = np.array([(xk**ii).dot(pk) for ii in range(num_coef)])
        assert np.allclose(moments, true_moments), (moments, true_moments)
        p = evaluate_orthonormal_polynomial_1d(x, num_coef - 1, ab)
        assert np.allclose((p.T * w).dot(p), np.eye(num_coef))
        p = evaluate_orthonormal_polynomial_1d(xk, num_coef - 1, ab)
        assert np.allclose((p.T * pk).dot(p), np.eye(num_coef))
def additive_polynomial(samples,
                        univariate_function_params,
                        recursion_coeffs,
                        return_univariate_vals=False):
    num_vars, num_samples = samples.shape
    values = np.zeros((num_samples, 1), dtype=float)
    univariate_values = np.empty((num_samples, num_vars), dtype=float)
    for ii in range(num_vars):
        degree = univariate_function_params[ii].shape[0] - 1
        basis_matrix = evaluate_orthonormal_polynomial_1d(
            samples[ii, :], degree, recursion_coeffs)
        univariate_values[:, ii] = np.dot(basis_matrix,
                                          univariate_function_params[ii])
        values += univariate_values[:, ii]
    if not return_univariate_vals:
        return values
    else:
        return values, univariate_values
    def test_convert_monomials_to_orthonormal_polynomials_1d(self):
        rho = 0.
        degree = 10
        probability_measure = True
        ab = hermite_recurrence(degree + 1,
                                rho,
                                probability=probability_measure)

        basis_mono_coefs = convert_orthonormal_polynomials_to_monomials_1d(
            ab, degree)

        x = np.random.normal(0, 1, (100))
        print('Cond number', np.linalg.cond(basis_mono_coefs))
        basis_ortho_coefs = np.linalg.inv(basis_mono_coefs)
        ortho_basis_matrix = evaluate_orthonormal_polynomial_1d(x, degree, ab)
        mono_basis_matrix = x[:, None]**np.arange(degree + 1)[None, :]
        assert np.allclose(mono_basis_matrix,
                           ortho_basis_matrix.dot(basis_ortho_coefs.T))
Beispiel #17
0
def get_numerically_generated_recursion_coefficients_from_samples(
        xk, pk, num_coefs, orthonormality_tol, truncated_probability_tol=0):

    if num_coefs > xk.shape[0]:
        msg = "Number of coefs requested is larger than number of "
        msg += "probability masses"
        raise ValueError(msg)
    recursion_coeffs = lanczos(xk, pk, num_coefs, truncated_probability_tol)

    p = evaluate_orthonormal_polynomial_1d(np.asarray(xk, dtype=float),
                                           num_coefs - 1, recursion_coeffs)
    error = np.absolute((p.T * pk).dot(p) - np.eye(num_coefs)).max()
    if error > orthonormality_tol:
        msg = "basis created is ill conditioned. "
        msg += f"Max error: {error}. Max terms: {xk.shape[0]}, "
        msg += f"Terms requested: {num_coefs}"
        raise ValueError(msg)
    return recursion_coeffs
    def test_convert_orthonormal_polynomials_to_monomials_1d(self):
        """
        Example: orthonormal Hermite polynomials
        deg  monomial coeffs
        0    [1,0,0]
        1    [0,1,0]         1/1*((x-0)*1-1*0)=x
        2    [1/c,0,1/c]     1/c*((x-0)*x-1*1)=(x**2-1)/c,            c=sqrt(2)
        3    [0,-3/d,0,1/d]  1/d*((x-0)*(x**2-1)/c-c*x)=
                             1/(c*d)*(x**3-x-c**2*x)=(x**3-3*x)/(c*d),d=sqrt(3)
        """
        rho = 0.
        degree = 10
        probability_measure = True
        ab = hermite_recurrence(degree + 1,
                                rho,
                                probability=probability_measure)

        basis_mono_coefs = convert_orthonormal_polynomials_to_monomials_1d(
            ab, 4)

        true_basis_mono_coefs = np.zeros((5, 5))
        true_basis_mono_coefs[0, 0] = 1
        true_basis_mono_coefs[1, 1] = 1
        true_basis_mono_coefs[2, [0, 2]] = -1 / np.sqrt(2), 1 / np.sqrt(2)
        true_basis_mono_coefs[3, [1, 3]] = -3 / np.sqrt(6), 1 / np.sqrt(6)
        true_basis_mono_coefs[4,
                              [0, 2, 4]] = np.array([3, -6, 1]) / np.sqrt(24)

        assert np.allclose(basis_mono_coefs, true_basis_mono_coefs)

        coefs = np.ones(degree + 1)
        basis_mono_coefs = convert_orthonormal_polynomials_to_monomials_1d(
            ab, degree)
        mono_coefs = np.sum(basis_mono_coefs * coefs, axis=0)

        x = np.linspace(-3, 3, 5)
        p_ortho = evaluate_orthonormal_polynomial_1d(x, degree, ab)
        ortho_vals = p_ortho.dot(coefs)

        from pyapprox.monomial import evaluate_monomial
        mono_vals = evaluate_monomial(
            np.arange(degree + 1)[np.newaxis, :], mono_coefs,
            x[np.newaxis, :])[:, 0]
        assert np.allclose(ortho_vals, mono_vals)
    def test_hahn_hypergeometric(self):
        """
        Given 20 animals, of which 7 are dogs. Then hypergeometric PDF gives
        the probability of finding a given number of dogs if we choose at
        random 12 of the 20 animals.
        """
        degree = 4
        M, n, N = 20, 7, 12
        apoly, bpoly = -(n + 1), -M - 1 + n
        ab = hahn_recurrence(degree + 1, N, apoly, bpoly)
        x, w = gauss_quadrature(ab, degree + 1)

        rv = hypergeom(M, n, N)
        true_mean = rv.mean()
        quadrature_mean = x.dot(w)
        assert np.allclose(true_mean, quadrature_mean)

        x = np.arange(0, n + 1)
        p = evaluate_orthonormal_polynomial_1d(x, degree, ab)
        w = rv.pmf(x)
        assert np.allclose(np.dot(p.T * w, p), np.eye(degree + 1))
    def test_orthonormality_legendre_polynomial(self):
        alpha = 0.
        beta = 0.
        degree = 3
        probability_measure = True

        ab = jacobi_recurrence(degree + 1,
                               alpha=alpha,
                               beta=beta,
                               probability=probability_measure)

        x, w = np.polynomial.legendre.leggauss(degree + 1)
        # make weights have probablity weight function w=1/2
        w /= 2.0
        p = evaluate_orthonormal_polynomial_1d(x, degree, ab)
        # test orthogonality
        exact_moments = np.zeros((degree + 1))
        exact_moments[0] = 1.0
        assert np.allclose(np.dot(p.T, w), exact_moments)
        # test orthonormality
        assert np.allclose(np.dot(p.T * w, p), np.eye(degree + 1))

        assert np.allclose(
            evaluate_orthonormal_polynomial_deriv_1d(x, degree, ab, 0), p)
Beispiel #21
0
def core_grad_left(sample, core_params, core_params_map, ranks,
                   recursion_coeffs, left_vals):
    """
    Evaluate the value and intermediate derivaties, with respect to
    univariate function basis parameters, of a core of the function train at
    a sample.

    Parameters
    ----------
    sample : float
        The sample at which to evaluate the function train

    univariate_params : [ np.ndarray (num_coeffs_i) ] (ranks[0]*ranks[2])
        The params of each univariate function. May be of different size
        i.e. num_params_i can be different for i=0,...,ranks[0]*ranks[1]

    ranks : np.ndarray (2)
        The ranks of the core [r_{k-1},r_k]

    recursion_coeffs : np.ndarray (max_degree+1)
        The recursion coefficients used to evaluate the univariate functions
        which are assumed to polynomials defined by the recursion coefficients

    left_vals : np.ndarray (ranks[0])
        The values of the product of all previous cores F_1F_2...F_{k-1}.
        If None no derivatives will be computed. Setting None is useful if
        one only want function values or when computing derivatives of first
        core.

    Returns
    -------
    core_values : np.ndarray (ranks[0],ranks[1])
        The values of each univariate function evaluated at the sample

    derivs : [ [] (num_params_i) ] (ranks[0]*ranks[1])
        The derivates of the univariate function with respect to the
        basis parameters after the left pass algorithm.
        Derivs of univariate functions are in column major ordering

    Notes
    -----
    If we assume each univariate function for variable ii is fixed
    we only need to compute basis matrix once. This is also true
    if we compute basis matrix for max degree of the univariate functions
    of the ii variable. If degree of a given univariate function is
    smaller we can just use subset of matrix. This comes at the cost of
    more storage but less computations than if vandermonde was computed
    for each different degree. We build max_degree vandermonde here.
    """
    assert ranks.shape[0] == 2
    assert np.isscalar(sample)
    if left_vals is not None:
        assert left_vals.ndim == 2 and left_vals.shape[0] == 1

    core_values = np.empty((ranks[0]*ranks[1]), dtype=float)
    core_derivs = np.empty_like(core_params)

    max_degree = recursion_coeffs.shape[0]-1
    basis_matrix = evaluate_orthonormal_polynomial_1d(
        np.asarray([sample]), max_degree, recursion_coeffs)
    cnt = 0
    for kk in range(ranks[1]):
        for jj in range(ranks[0]):
            params = get_params_of_univariate_function(
                jj, kk, ranks, core_params, core_params_map)
            degree = params.shape[0]-1
            assert degree < recursion_coeffs.shape[0]
            univariate_function_num = get_univariate_function_number(
                ranks[0], jj, kk)
            core_values[univariate_function_num] = np.dot(
                basis_matrix[:, :degree+1], params)
            if left_vals is not None:
                core_derivs[cnt:cnt+params.shape[0]] = \
                    left_vals[0, jj]*basis_matrix[:, :degree+1]
            else:
                core_derivs[cnt:cnt+params.shape[0]
                            ] = basis_matrix[:, :degree+1]
            cnt += params.shape[0]
    return core_values, core_derivs
Beispiel #22
0
def evaluate_core(sample, core_params, core_params_map, ranks,
                  recursion_coeffs):
    """
    Evaluate a core of the function train at a sample

    Parameters
    ----------
    sample : float
        The sample at which to evaluate the function train

    univariate_params : [ np.ndarray (num_coeffs_i) ] (ranks[0]*ranks[2])
        The coeffs of each univariate function. May be of different size
        i.e. num_coeffs_i can be different for i=0,...,ranks[0]*ranks[1]

    ranks : np.ndarray (2)
        The ranks of the core [r_{k-1},r_k]

    recursion_coeffs : np.ndarray (max_degree+1)
        The recursion coefficients used to evaluate the univariate functions
        which are assumed to polynomials defined by the recursion coefficients

    Returns
    -------
    core_values : np.ndarray (ranks[0],ranks[1])
        The values of each univariate function evaluated at the sample

    Notes
    -----
    If we assume each univariate function for variable ii is fixed
    we only need to compute basis matrix once. This is also true
    if we compute basis matrix for max degree of the univariate functions
    of the ii variable. If degree of a given univariate function is
    smaller we can just use subset of matrix. This comes at the cost of
    more storage but less computations than if vandermonde was computed
    for each different degree. We build max_degree vandermonde here.
    """
    try:
        from pyapprox.python.function_train import evaluate_core_pyx
        return evaluate_core_pyx(sample, core_params, core_params_map, ranks,
                                 recursion_coeffs)
        # from pyapprox.weave.function_train import c_evalute_core
        # return c_evaluate_core(sample, core_params, core_params_map, ranks,
        #                        recursion_coeffs)
    except:
        pass

    assert ranks.shape[0] == 2
    assert np.isscalar(sample)

    core_values = np.empty((ranks[0], ranks[1]), dtype=float)

    max_degree = recursion_coeffs.shape[0]-1
    basis_matrix = evaluate_orthonormal_polynomial_1d(
        np.asarray([sample]), max_degree, recursion_coeffs)
    for kk in range(ranks[1]):
        for jj in range(ranks[0]):
            params = get_params_of_univariate_function(
                jj, kk, ranks, core_params, core_params_map)
            degree = params.shape[0]-1
            assert degree < recursion_coeffs.shape[0]
            core_values[jj, kk] = np.dot(basis_matrix[:, :degree+1], params)
    return core_values
Beispiel #23
0
def conditional_moments_of_polynomial_chaos_expansion(poly,
                                                      samples,
                                                      inactive_idx,
                                                      return_variance=False):
    """
    Return mean and variance of polynomial chaos expansion with some variables
    fixed at specified values.

    Parameters
    ----------
    poly: PolynomialChaosExpansion
        The polynomial used to compute moments

    inactive_idx : np.ndarray (ninactive_vars)
        The indices of the fixed variables

    samples : np.ndarray (ninactive_vars)
        The samples of the inacive dimensions fixed when computing moments

    Returns
    -------
    mean : np.ndarray
       The conditional mean (num_qoi)

    variance : np.ndarray
       The conditional variance (num_qoi). Only returned if
       return_variance=True. Computing variance is significantly slower than
       computing mean. TODO check it is indeed slower
    """
    assert samples.shape[0] == len(inactive_idx)
    assert samples.ndim == 2 and samples.shape[1] == 1
    assert poly.coefficients is not None
    coef = poly.get_coefficients()
    indices = poly.get_indices()

    # precompute 1D basis functions for faster evaluation of
    # multivariate terms
    basis_vals_1d = []
    for dd in range(len(inactive_idx)):
        basis_vals_1d_dd = evaluate_orthonormal_polynomial_1d(
            samples[dd, :], indices[inactive_idx[dd], :].max(),
            poly.recursion_coeffs[poly.basis_type_index_map[inactive_idx[dd]]])
        basis_vals_1d.append(basis_vals_1d_dd)

    active_idx = np.setdiff1d(np.arange(poly.num_vars()), inactive_idx)
    mean = coef[0].copy()
    for ii in range(1, indices.shape[1]):
        index = indices[:, ii]
        coef_ii = coef[ii]  # this intentionally updates the coef matrix
        for dd in range(len(inactive_idx)):
            coef_ii *= basis_vals_1d[dd][0, index[inactive_idx[dd]]]
        if index[active_idx].sum() == 0:
            mean += coef_ii

    if not return_variance:
        return mean

    unique_indices, repeated_idx = np.unique(indices[active_idx, :],
                                             axis=1,
                                             return_inverse=True)
    new_coef = np.zeros((unique_indices.shape[1], coef.shape[1]))
    for ii in range(repeated_idx.shape[0]):
        new_coef[repeated_idx[ii]] += coef[ii]
    variance = np.sum(new_coef**2, axis=0) - mean**2
    return mean, variance
 def integrand(measure, x):
     # Note eval orthogonal poly uses the new value for ab[ii, 0]
     # This is the desired behavior
     pvals = evaluate_orthonormal_polynomial_1d(np.atleast_1d(x), ii,
                                                ab)
     return measure(x) * pvals[:, ii]**2
Beispiel #25
0
 def generate_basis_matrix(x):
     return evaluate_orthonormal_polynomial_1d(x[0, :], num_leja_samples,
                                               recursion_coeffs)
    def test_get_recursion_coefficients_from_variable_discrete(self):
        degree = 4
        N = 10
        scipy_discrete_var_names = [
            n for n in stats._discrete_distns._distn_names
        ]
        discrete_var_names = [
            "binom", "bernoulli", "nbinom", "geom", "hypergeom", "logser",
            "poisson", "planck", "boltzmann", "randint", "zipf", "dlaplace",
            "skellam", "yulesimon"
        ]
        # valid shape parameters for each distribution in names
        # there is a one to one correspondence between entries
        discrete_var_shapes = [{
            "n": 10,
            "p": 0.5
        }, {
            "p": 0.5
        }, {
            "n": 10,
            "p": 0.5
        }, {
            "p": 0.5
        }, {
            "M": 20,
            "n": 7,
            "N": 12
        }, {
            "p": 0.5
        }, {
            "mu": 1
        }, {
            "lambda_": 1
        }, {
            "lambda_": 2,
            "N": 10
        }, {
            "low": 0,
            "high": 10
        }, {
            "a": 2
        }, {
            "a": 1
        }, {
            "mu1": 1,
            "mu2": 3
        }, {
            "alpha": 1
        }]

        for name in scipy_discrete_var_names:
            assert name in discrete_var_names

        # do not support :
        #    yulesimon as there is a bug when interval is called
        #       from a frozen variable
        #    bernoulli which only has two masses
        #    zipf unusual distribution and difficult to compute basis
        #    crystallball is discontinuous and requires special integrator
        #        this can be developed if needed
        unsupported_discrete_var_names = ["bernoulli", "yulesimon", "zipf"]
        for name in unsupported_discrete_var_names:
            ii = discrete_var_names.index(name)
            del discrete_var_names[ii]
            del discrete_var_shapes[ii]

        for name, shapes in zip(discrete_var_names, discrete_var_shapes):
            # print(name)
            var = getattr(stats, name)(**shapes)
            xk, pk = get_probability_masses(var, 1e-15)
            loc, scale = transform_scale_parameters(var)
            xk = (xk - loc) / scale
            ab = get_recursion_coefficients_from_variable(
                var, degree + 1, {
                    "orthonormality_tol": 3e-14,
                    "truncated_probability_tol": 1e-15,
                    "numeric": False
                })
            basis_mat = evaluate_orthonormal_polynomial_1d(xk, degree, ab)
            gram_mat = (basis_mat * pk[:, None]).T.dot(basis_mat)
            assert np.allclose(gram_mat, np.eye(basis_mat.shape[1]), atol=2e-8)

        # custom discrete variables
        xk1, pk1 = np.arange(N), np.ones(N) / N
        xk2, pk2 = np.arange(N)**2, np.ones(N) / N
        custom_vars = [
            float_rv_discrete(name="discrete_chebyshev", values=(xk1, pk1))(),
            float_rv_discrete(name="float_rv_discrete", values=(xk2, pk2))()
        ]
        for var in custom_vars:
            xk, pk = get_probability_masses(var, 1e-15)
            loc, scale = transform_scale_parameters(var)
            xk = (xk - loc) / scale
            ab = get_recursion_coefficients_from_variable(
                var, degree + 1, {
                    "orthonormality_tol": 1e-14,
                    "truncated_probability_tol": 1e-15
                })
            basis_mat = evaluate_orthonormal_polynomial_1d(xk, degree, ab)
            gram_mat = (basis_mat * pk[:, None]).T.dot(basis_mat)
            assert np.allclose(gram_mat, np.eye(basis_mat.shape[1]), atol=2e-8)
 def integrand(x):
     x = np.atleast_1d(x)
     basis_mat = evaluate_orthonormal_polynomial_1d(x, order, ab)
     return var.pdf(x * scale + loc) * scale * (basis_mat[:, order1] *
                                                basis_mat[:, order2])
 def integrand(x):
     y = fun(x).squeeze()
     pvals = evaluate_orthonormal_polynomial_1d(y, ii, ab)
     # measure not included in integral because it is assumed to
     # be in the quadrature rules
     return pvals[:, ii]**2
    def test_gradient_function_train_additive_function(self):
        """
        Test the gradient of a function train representation of an additive
        function. Gradient is with respect to coefficients of the univariate
        functions

        Assume different parameterization for some univariate functions.
        Zero and ones are stored as a constant basis. Where as other entries
        are stored as a polynomial of a fixed degree d.
        """
        alpha = 0
        beta = 0
        degree = 2
        num_vars = 3
        recursion_coeffs = jacobi_recurrence(degree + 1,
                                             alpha=alpha,
                                             beta=beta,
                                             probability=True)

        univariate_function_params = [np.random.normal(0., 1., (degree + 1))
                                      ] * num_vars
        ft_data = generate_additive_function_in_function_train_format(
            univariate_function_params, True)

        sample = np.random.uniform(-1., 1., (num_vars, 1))
        value, ft_gradient = evaluate_function_train_grad(
            sample, ft_data, recursion_coeffs)

        true_values, univariate_values = additive_polynomial(
            sample,
            univariate_function_params,
            recursion_coeffs,
            return_univariate_vals=True)
        true_value = true_values[0, 0]
        assert np.allclose(value, true_value)

        true_gradient = np.empty((0), dtype=float)
        # var 0 univariate function 1,1
        basis_matrix_var_0 = evaluate_orthonormal_polynomial_1d(
            sample[0, :], degree, recursion_coeffs)
        true_gradient = np.append(true_gradient, basis_matrix_var_0)
        # var 0 univariate function 1,2
        true_gradient = np.append(true_gradient,
                                  np.sum(univariate_values[0, 1:]))

        basis_matrix_var_1 = evaluate_orthonormal_polynomial_1d(
            sample[1, :], degree, recursion_coeffs)
        # var 1 univariate function 1,1
        true_gradient = np.append(true_gradient, univariate_values[0, 0])
        # var 1 univariate function 2,1
        true_gradient = np.append(true_gradient, basis_matrix_var_1)
        # var 1 univariate function 1,2
        true_gradient = np.append(
            true_gradient, univariate_values[0, 0] * univariate_values[0, 2])
        # var 1 univariate function 2,2
        true_gradient = np.append(true_gradient, univariate_values[0, 2])

        basis_matrix_var_2 = evaluate_orthonormal_polynomial_1d(
            sample[2, :], degree, recursion_coeffs)
        # var 2 univariate function 1,1
        true_gradient = np.append(true_gradient,
                                  univariate_values[0, :2].sum())
        # var 2 univariate function 2,1
        true_gradient = np.append(true_gradient, basis_matrix_var_2)

        fd_gradient = ft_parameter_finite_difference_gradient(
            sample, ft_data, recursion_coeffs)

        # print 'true',true_gradient
        # print 'ft  ',ft_gradient
        # print fd_gradient
        assert np.allclose(fd_gradient, true_gradient)
        assert np.allclose(ft_gradient, true_gradient)
 def integrand(measure, x):
     pvals = evaluate_orthonormal_polynomial_1d(np.atleast_1d(x), ii,
                                                ab)
     return measure(x) * pvals[:, ii] * pvals[:, ii - 1]