def test_ACVMC_objective_jacobian(self): cov = np.asarray([[1.00, 0.50, 0.25], [0.50, 1.00, 0.50], [0.25, 0.50, 4.00]]) costs = [4, 2, 1] target_cost = 20 nhf_samples, nsample_ratios = pya.allocate_samples_mlmc( cov, costs, target_cost)[:2] estimator = ACVMF(cov, costs) errors = pya.check_gradients( partial(acv_sample_allocation_objective, estimator), partial(acv_sample_allocation_jacobian_torch, estimator), nsample_ratios[:, np.newaxis], disp=False) #print(errors.min()) assert errors.min() < 1e-8
def test_mlmc_sample_allocation(self): # The following will give mlmc with unit variance # and discrepancy variances 1,4,4 target_cost = 81 cov = np.asarray([[1.00,0.50,0.25], [0.50,1.00,0.50], [0.25,0.50,4.00]]) # ensure cov is positive definite np.linalg.cholesky(cov) #print(np.linalg.inv(cov)) costs = [6,3,1] nmodels = len(costs) nhf_samples,nsample_ratios, log10_var = pya.allocate_samples_mlmc( cov, costs, target_cost) assert np.allclose(10**log10_var,1) nsamples = np.concatenate([[1],nsample_ratios])*nhf_samples lamda = 9 nsamples_discrepancy = 9*np.sqrt(np.asarray([1/(6+3),4/(3+1),4])) nsamples_true = [ nsamples_discrepancy[0],nsamples_discrepancy[:2].sum(), nsamples_discrepancy[1:3].sum()] assert np.allclose(nsamples,nsamples_true)
short_column_model = ShortColumnModelEnsemble() model_ensemble = pya.ModelEnsemble( [short_column_model.m0,short_column_model.m1,short_column_model.m2]) costs = np.asarray([100, 50, 5]) target_cost = int(1e4) idx = [0,1,2] cov = short_column_model.get_covariance_matrix()[np.ix_(idx,idx)] # generate pilot samples to estimate correlation # npilot_samples = int(1e4) # cov = pya.estimate_model_ensemble_covariance( # npilot_samples,short_column_model.generate_samples,model_ensemble)[0] # define the sample allocation nhf_samples,nsample_ratios = pya.allocate_samples_mlmc( cov, costs, target_cost)[:2] # generate sample sets samples,values =pya.generate_samples_and_values_mlmc( nhf_samples,nsample_ratios,model_ensemble, short_column_model.generate_samples) # compute mean using only hf data hf_mean = values[0][0].mean() # compute mlmc control variate weights eta = pya.get_mlmc_control_variate_weights(cov.shape[0]) # compute MLMC mean mlmc_mean = pya.compute_approximate_control_variate_mean_estimate(eta,values) # get the true mean of the high-fidelity model true_mean = short_column_model.get_means()[0] print('MLMC error',abs(mlmc_mean-true_mean)) print('MC error',abs(hf_mean-true_mean))