def test_beta_leja_quadrature(self): level = 12 alpha_stat, beta_stat = 2, 10 x_quad, w_quad = beta_leja_quadrature_rule( alpha_stat, beta_stat, level, return_weights_for_all_levels=False) import sympy as sp x = sp.Symbol('x') weight_function = beta_pdf_on_ab(alpha_stat, beta_stat, -1, 1, x) ranges = [-1, 1] exact_integral = float( sp.integrate(weight_function * x**3, (x, ranges[0], ranges[1]))) assert np.allclose(exact_integral, np.dot(x_quad**3, w_quad)) level = 12 alpha_stat, beta_stat = 2, 10 x_quad, w_quad = beta_leja_quadrature_rule( alpha_stat, beta_stat, level, return_weights_for_all_levels=False) x_quad = (x_quad + 1) / 2 import sympy as sp x = sp.Symbol('x') weight_function = beta_pdf_on_ab(alpha_stat, beta_stat, 0, 1, x) ranges = [0, 1] exact_integral = float( sp.integrate(weight_function * x**3, (x, ranges[0], ranges[1]))) assert np.allclose(exact_integral, np.dot(x_quad**3, w_quad))
def test_beta_leja_quadrature(self): level = 12 alpha_stat, beta_stat = 2, 10 quad_rule = get_univariate_leja_quadrature_rule( stats.beta(alpha_stat, beta_stat), leja_growth_rule, return_weights_for_all_levels=False) x_quad, w_quad = quad_rule(level) x = sp.Symbol('x') weight_function = beta_pdf_on_ab(alpha_stat, beta_stat, -1, 1, x) ranges = [-1, 1] exact_integral = float( sp.integrate(weight_function * x**3, (x, ranges[0], ranges[1]))) assert np.allclose(exact_integral, np.dot(x_quad**3, w_quad)) level = 12 alpha_stat, beta_stat = 2, 10 x_quad, w_quad = quad_rule(level) x_quad = (x_quad + 1) / 2 x = sp.Symbol('x') weight_function = beta_pdf_on_ab(alpha_stat, beta_stat, 0, 1, x) ranges = [0, 1] exact_integral = float( sp.integrate(weight_function * x**3, (x, ranges[0], ranges[1]))) assert np.allclose(exact_integral, np.dot(x_quad**3, w_quad))
def setup(self,num_vars,alpha_stat,beta_stat): #univariate_weight_function=lambda x: beta(alpha_stat,beta_stat).pdf( # (x+1)/2)/2 univariate_weight_function=lambda x: beta_pdf_on_ab( alpha_stat,beta_stat,-1,1,x) univariate_weight_function_deriv = lambda x: beta_pdf_derivative( alpha_stat,beta_stat,(x+1)/2)/4 weight_function = partial( evaluate_tensor_product_function, [univariate_weight_function]*num_vars) weight_function_deriv = partial( gradient_of_tensor_product_function, [univariate_weight_function]*num_vars, [univariate_weight_function_deriv]*num_vars) assert np.allclose( (univariate_weight_function(0.5+1e-6)- univariate_weight_function(0.5))/1e-6, univariate_weight_function_deriv(0.5),atol=1e-6) poly = PolynomialChaosExpansion() var_trans = define_iid_random_variable_transformation( uniform(-2,1),num_vars) poly_opts = {'alpha_poly':beta_stat-1,'beta_poly':alpha_stat-1, 'var_trans':var_trans,'poly_type':'jacobi'} poly.configure(poly_opts) return weight_function, weight_function_deriv, poly
def univariate_weight_function(x): return beta_pdf_on_ab(alpha_stat, beta_stat, -1, 1, x)