Exemplo n.º 1
0
def setup_check_variance_reduction_model_ensemble_short_column(
        nmodels=5,npilot_samples=None):
    example = ShortColumnModelEnsemble()
    model_ensemble = pya.ModelEnsemble(
        [example.models[ii] for ii in range(nmodels)])
    univariate_variables = [
        uniform(5,10),uniform(15,10),norm(500,100),norm(2000,400),
        lognorm(s=0.5,scale=np.exp(5))]
    variable=pya.IndependentMultivariateRandomVariable(univariate_variables)
    generate_samples=partial(
        pya.generate_independent_random_samples,variable)

    if npilot_samples is not None:
        # The number of pilot samples effects ability of numerical estimate
        # of variance reduction to match theoretical value
        cov, samples, weights = pya.estimate_model_ensemble_covariance(
            npilot_samples,generate_samples,model_ensemble)
    else:
        # it is difficult to create a quadrature rule for the lognormal
        # distribution so instead define the variable as normal and then
        # apply log transform
        univariate_variables = [
            uniform(5,10),uniform(15,10),norm(500,100),norm(2000,400),
            norm(loc=5,scale=0.5)]
        variable=pya.IndependentMultivariateRandomVariable(
            univariate_variables)

        example.apply_lognormal=True
        cov = example.get_covariance_matrix(variable)[:nmodels,:nmodels]
        example.apply_lognormal=False
        
    return model_ensemble, cov, generate_samples
Exemplo n.º 2
0
def plot_mlmc_error():
    """
    Define function to create plot so we can create it later and add
    other estimator error curves.

    Note this is only necessary for sphinx-gallery docs. If just using the 
    jupyter notebooks they create then we do not need this function definition 
    and simply need to call fig at end of next cell to regenerate plot.
    """
    variances, nsamples_history = [], []
    npilot_samples = 5
    estimator = pya.MLMC
    for target_cost in target_costs:
        # compute variance  using exact covariance for sample allocation
        est = estimator(cov, costs)
        nhf_samples, nsample_ratios = est.allocate_samples(target_cost)[:2]
        variances.append(est.get_variance(nhf_samples, nsample_ratios))
        nsamples_history.append(est.get_nsamples(nhf_samples, nsample_ratios))
        # compute single fidelity Monte Carlo variance
        total_cost = nsamples_history[-1].dot(costs)
        variances.append(cov[0, 0] / int(total_cost / costs[0]))
        nsamples_history.append(int(total_cost / costs[0]))
        # compute variance using approx covariance for sample allocation
        # use nhf_samples from previous target_cost as npilot_samples.
        # This way the pilot samples are only an additional cost at the first
        # step. This code does not do this though for simplicity
        cov_approx = pya.estimate_model_ensemble_covariance(
            npilot_samples, poly_model.generate_samples, model_ensemble)[0]
        est = estimator(cov_approx, costs)
        nhf_samples, nsample_ratios = est.allocate_samples(target_cost)[:2]
        variances.append(est.get_variance(nhf_samples, nsample_ratios))
        nsamples_history.append(est.get_nsamples(nhf_samples, nsample_ratios))
        npilot_samples = nhf_samples

    fig, axs = plt.subplots(1, 2, figsize=(2 * 8, 6))
    pya.plot_acv_sample_allocation(nsamples_history[::3], costs, model_labels,
                                   axs[1])
    total_costs = np.array(nsamples_history[::3]).dot(costs)
    axs[0].loglog(total_costs, variances[::3], label=r'$\mathrm{MLMC}$')
    mc_line = axs[0].loglog(total_costs,
                            variances[1::3],
                            label=r'$\mathrm{MC}$')
    total_costs = np.array(nsamples_history[2::3]).dot(costs)
    axs[0].loglog(total_costs,
                  variances[2::3],
                  '--',
                  label=r'$\mathrm{MLMC^\dagger}$')
    axs[0].set_xlabel(r'$\mathrm{Total}\;\mathrm{Cost}$')
    axs[0].set_ylabel(r'$\mathrm{Variance}$')
    _ = axs[0].legend()
    return fig, axs