def eval_function_at_multiple_design_and_random_samples( function, uq_samples, design_samples): """ for functions which only take 1d arrays for uq_samples and design_samples loop over all combinations and evaluate function at each combination design_samples vary slowest and uq_samples vary fastest Let design samples = [[1,2],[2,3]] uq_samples = [[0, 0, 0],[0, 1, 2]] Then samples will be ([1, 2], [0, 0, 0]) ([1, 2], [0, 1, 2]) ([3, 4], [0, 0, 0]) ([3, 4], [0, 1, 2]) function(uq_samples,design_samples) """ vals = [] # put design samples first so that samples iterates over uq_samples fastest samples = get_all_sample_combinations(design_samples, uq_samples) for xx, zz in zip(samples[:design_samples.shape[0]].T, samples[design_samples.shape[0]:].T): # flip xx,zz because functions assumed to take uq_samples then # design_samples vals.append(function(zz, xx)) return np.asarray(vals)
def __call__(self, reduced_samples): raw_samples = get_all_sample_combinations( self.inactive_var_values, reduced_samples) samples = np.empty_like(raw_samples) samples[self.inactive_var_indices, :] = raw_samples[:self.inactive_var_indices.shape[0]] samples[self.active_var_indices, :] = raw_samples[self.inactive_var_indices.shape[0]:] return self.function(samples)
def test_conditional_moments_of_polynomial_chaos_expansion(self): num_vars = 3 degree = 2 inactive_idx = [0, 2] np.random.seed(1) # keep variables on canonical domain to make constructing # tensor product quadrature rule, used for testing, easier var = [uniform(-1, 2), beta(2, 2, -1, 2), norm(0, 1)] quad_rules = [ partial(gauss_jacobi_pts_wts_1D, alpha_poly=0, beta_poly=0), partial(gauss_jacobi_pts_wts_1D, alpha_poly=1, beta_poly=1), partial(gauss_hermite_pts_wts_1D) ] var_trans = AffineRandomVariableTransformation(var) poly = PolynomialChaosExpansion() poly_opts = define_poly_options_from_variable_transformation(var_trans) poly.configure(poly_opts) poly.set_indices(compute_hyperbolic_indices(num_vars, degree, 1.0)) poly.set_coefficients( np.arange(poly.indices.shape[1], dtype=float)[:, np.newaxis]) fixed_samples = np.array( [[vv.rvs() for vv in np.array(var)[inactive_idx]]]).T mean, variance = conditional_moments_of_polynomial_chaos_expansion( poly, fixed_samples, inactive_idx, True) from pyapprox.utilities import get_all_sample_combinations from pyapprox.probability_measure_sampling import \ generate_independent_random_samples active_idx = np.setdiff1d(np.arange(num_vars), inactive_idx) random_samples, weights = get_tensor_product_quadrature_rule( [2 * degree] * len(active_idx), len(active_idx), [quad_rules[ii] for ii in range(num_vars) if ii in active_idx]) samples = get_all_sample_combinations(fixed_samples, random_samples) temp = samples[len(inactive_idx):].copy() samples[inactive_idx] = samples[:len(inactive_idx)] samples[active_idx] = temp true_mean = (poly(samples).T.dot(weights).T) true_variance = ((poly(samples)**2).T.dot(weights).T) - true_mean**2 assert np.allclose(true_mean, mean) assert np.allclose(true_variance, variance)
def error_vs_cost(model, generate_random_samples, validation_levels): #import sys #sys.setrecursionlimit(10) #model=WorkTrackingModel(model,model.base_model) num_samples = 10 validation_levels = np.asarray(validation_levels) assert len(validation_levels) == model.base_model.num_config_vars config_vars = cartesian_product( [np.arange(ll) for ll in validation_levels]) random_samples = generate_random_samples(num_samples) samples = get_all_sample_combinations(random_samples, config_vars) reference_samples = samples[:, ::config_vars.shape[1]].copy() reference_samples[-model.base_model.num_config_vars:,:]=\ validation_levels[:,np.newaxis] reference_values = model(reference_samples) reference_mean = reference_values[:, 0].mean() values = model(samples) # put keys in order returned by cartesian product keys = sorted(model.work_tracker.costs.keys(), key=lambda x: x[::-1]) keys = keys[: -1] # remove validation key associated with validation samples costs, ndofs, means, errors = [], [], [], [] for ii in range(len(keys)): key = keys[ii] costs.append(np.median(model.work_tracker.costs[key])) nx, ny, dt = model.base_model.get_degrees_of_freedom_and_timestep( np.asarray(key)) ndofs.append(nx * ny * model.base_model.final_time / dt) print(key, ndofs[-1], nx, ny, model.base_model.final_time / dt) means.append(np.mean(values[ii::config_vars.shape[1], 0])) errors.append(abs(means[-1] - reference_mean) / abs(reference_mean)) times = costs.copy() # make costs relative costs /= costs[-1] n1, n2, n3 = validation_levels indices = np.reshape(np.arange(len(keys), dtype=int), (n1, n2, n3), order='F') costs = np.reshape(np.array(costs), (n1, n2, n3), order='F') ndofs = np.reshape(np.array(ndofs), (n1, n2, n3), order='F') errors = np.reshape(np.array(errors), (n1, n2, n3), order='F') times = np.reshape(np.array(times), (n1, n2, n3), order='F') validation_index = reference_samples[-model.base_model.num_config_vars:, 0] validation_time = np.median( model.work_tracker.costs[tuple(validation_levels)]) validation_cost = validation_time / costs[-1] validation_ndof = np.prod(reference_values[:, -2:], axis=1) data = { "costs": costs, "errors": errors, "indices": indices, "times": times, "validation_index": validation_index, "validation_cost": validation_cost, "validation_ndof": validation_ndof, "validation_time": validation_time, "ndofs": ndofs } return data