# Create a low-fidelity model
            lf_model = Model(
                eval_fun=lambda x, samples=prior_pf_samples_lf: elliptic_pde.find_xy_pair(x, prior_samples, samples),
                rv_samples=prior_samples[indices], rv_samples_pred=prior_samples[indices], n_evals=n_evals[0],
                n_qoi=n_qoi, rv_name='$q_0$', label='Low-fidelity')

            # Create a high-fidelity model
            hf_model = Model(
                eval_fun=lambda x, samples=prior_pf_samples_hf: elliptic_pde.find_xy_pair(x, prior_samples, samples),
                n_evals=n_evals[-1], n_qoi=n_qoi, rv_name='$Q$', label='High-fidelity')

            models = [lf_model, hf_model]

            # Setup MFMC
            mfmc = MFMC(models=models,
                        training_set_strategy=training_set_strategy, regression_type=regression_type)

            # Apply MFMC
            mfmc.apply_mfmc_framework(verbose=False)
            prior_pf_samples = mfmc.get_samples()[-1, :, :]
            p_prior_pf = Distribution(prior_pf_samples, rv_name='$Q$', label='Prior-PF')

            # l1 error between prior push-forward and reference push-forward
            l1_prior_pf_1hf_1lf.append(ref_p_prior_pf.calculate_l1_error(p_prior_pf))

        # -------------- 1 HF, 2 LF

        l1_prior_pf_1hf_2lf = []
        for idx, n_evals in enumerate(n_evals_mfmc_hf_2lf):
            n_evals = [n_evals_mfmc_lf_2lf[idx], n_evals_mfmc_mf_2lf[idx], n_evals]
            indices = np.random.choice(range(prior_samples.shape[0]), size=n_evals[0], replace=False)
Beispiel #2
0
def get_prior_prior_pf_samples():
    prior_samples, prior_pf_samples, obs_loc, obs_scale, prior_pf_mc_samples, mc_model, n_qoi = \
        None, None, None, None, None, None, None

    # Check push forward method
    if fw_uq_method not in ['mc', 'mfmc']:
        print('Unknown forward uq method: %r' % fw_uq_method)
        exit()

    if model == 'lambda_p':

        n_qoi = 1
        obs_loc = [0.25]
        obs_scale = [0.1]
        prior_samples = lambda_p.get_prior_samples(n_mc_ref)

        # Create the Monte Carlo reference
        mc_model = Model(eval_fun=lambda x: lambda_p.lambda_p(x, 5), rv_samples=prior_samples,
                         rv_samples_pred=prior_samples, n_evals=n_mc_ref, n_qoi=n_qoi, rv_name='$Q$',
                         label='MC reference')

        if fw_uq_method == 'mc':

            # Brute force Monte Carlo
            prior_pf_samples = mc_model.evaluate()
            prior_pf_samples = np.reshape(prior_pf_samples, (1, n_mc_ref, np.shape(prior_pf_samples)[1]))
            prior_pf_mc_samples = prior_pf_samples

        elif fw_uq_method == 'mfmc':

            # Create a low-fidelity model
            lf_model = Model(eval_fun=lambda x: lambda_p.lambda_p(x, 1), rv_samples=prior_samples[:n_evals[0]],
                             rv_samples_pred=prior_samples[:n_evals[0]], n_evals=n_evals[0], n_qoi=n_qoi,
                             rv_name='Low-fidelity: $q_0$', label='Low-fidelity')

            # Create a high-fidelity model
            if n_models == 3:
                hf_label = 'Multi-fidelity (low, mid, high)'
            elif n_models == 2:
                hf_label = 'Multi-fidelity (low, high)'
            else:
                hf_label = 'Multi-fidelity'
            hf_model = Model(eval_fun=lambda x: lambda_p.lambda_p(x, 5), n_evals=n_evals[-1],
                             n_qoi=n_qoi, rv_name='High-fidelity: $Q$', label=hf_label)

            if n_models == 2:
                models = [lf_model, hf_model]

            # Create a mid fidelity model
            elif n_models == 3:
                mf_model = Model(eval_fun=lambda x: lambda_p.lambda_p(x, 3), n_evals=n_evals[1], n_qoi=n_qoi,
                                 rv_name='Mid-fidelity: $q_1$', label='Multi-fidelity (low, mid)')
                models = [lf_model, mf_model, hf_model]

            else:
                print('Unsupported number of models (%d) for lambda_p.' % n_models)
                exit()

    elif model in ['elliptic_pde', 'elliptic_pde_2d', 'elliptic_pde_3d']:

        # Setup and load data
        if model == 'elliptic_pde':
            n_qoi = 1
            obs_loc = [0.71]
            obs_scale = [0.02]

        elif model == 'elliptic_pde_2d':
            n_qoi = 2
            obs_loc = [0.71, 0.12]
            obs_scale = [0.02, 0.02]

        elif model == 'elliptic_pde_3d':
            n_qoi = 3
            obs_loc = [0.71, 0.12, 0.45]
            obs_scale = [0.02, 0.02, 0.02]

        prior_pf_samples = elliptic_pde.load_data()
        prior_samples = np.reshape(range(n_mc_ref), (n_mc_ref, 1))  # we only need some id here

        mc_model = Model(eval_fun=None, rv_samples=prior_samples, rv_samples_pred=prior_samples,
                         n_evals=n_mc_ref, n_qoi=n_qoi, rv_name='High-fidelity: $Q$', label='MC reference')
        mc_model.set_model_evals(prior_pf_samples[-1][:n_mc_ref, 0:n_qoi])

        if fw_uq_method == 'mc':

            # Monte Carlo reference
            prior_pf_samples = prior_pf_samples[-1][:n_mc_ref, 0:n_qoi]
            prior_pf_samples = np.reshape(prior_pf_samples, (1, n_mc_ref, n_qoi))
            prior_pf_mc_samples = prior_pf_samples

        elif fw_uq_method == 'mfmc':

            if n_models > 3:
                print('elliptic_pde only supports up to 3 fidelity levels.')
                exit()

            # Create a low-fidelity model
            samples = prior_pf_samples[0][:n_evals[0], 0:n_qoi]
            samples = samples ** 1.2  # add a bias
            lf_prior_samples = prior_samples[:n_evals[0]]
            lf_model = Model(
                eval_fun=lambda x, samples=samples: elliptic_pde.find_xy_pair(x, lf_prior_samples, samples),
                rv_samples=lf_prior_samples, rv_samples_pred=lf_prior_samples, n_evals=n_evals[0],
                n_qoi=n_qoi, rv_name='Low-fidelity: $q_0$', label='Low-fidelity')

            # Create a high-fidelity model
            if n_models == 3:
                hf_label = 'Multi-fidelity (low, mid, high)'
            elif n_models == 2:
                hf_label = 'Multi-fidelity (low, high)'
            else:
                hf_label = 'Multi-fidelity'
            samples = prior_pf_samples[-1][:n_evals[0], 0:n_qoi]
            hf_model = Model(
                eval_fun=lambda x, samples=samples: elliptic_pde.find_xy_pair(x, lf_prior_samples, samples),
                n_evals=n_evals[-1], n_qoi=n_qoi, rv_name='High-fidelity: $Q$', label=hf_label)

            if n_models == 3:
                # Create a mid fidelity model
                samples = prior_pf_samples[1][:n_evals[0], 0:n_qoi]
                samples = samples ** 1.1  # add a bias
                mf_model = Model(
                    eval_fun=lambda x, samples=samples: elliptic_pde.find_xy_pair(x, lf_prior_samples, samples),
                    n_evals=n_evals[1], n_qoi=n_qoi, rv_name='Mid-fidelity: $q_1$', label='Multi-fidelity (low, mid)')
                models = [lf_model, mf_model, hf_model]

            elif n_models == 2:
                models = [lf_model, hf_model]

            else:
                print('Unsupported number of models (%d) for elliptic_pde.' % n_models)
                exit()

    elif model is 'linear_elasticity':

        if training_set_strategy is not 'fixed':
            print('This model only supports a fixed training set.')
            exit()

        n_qoi = 1
        obs_loc = [0.95]
        obs_scale = [0.01]

        lf_data, hf_data, prior_samples = linear_elasticity.load_data()

        if fw_uq_method == 'mc':
            print('No MC reference available.')

        elif fw_uq_method == 'mfmc':

            if n_models > 2:
                print('linear_elasticity only supports 2 fidelity levels.')
                exit()

            # Create a low-fidelity model
            samples = lf_data[:n_evals[0]]
            lf_prior_samples = prior_samples[:n_evals[0], 0:n_qoi]
            lf_model = Model(eval_fun=lambda x, samples=samples: samples[x], rv_samples=lf_prior_samples,
                             rv_samples_pred=lf_prior_samples, n_evals=n_evals[0], n_qoi=n_qoi,
                             rv_name='Low-fidelity: $q$', label='Low-fidelity')
            data = np.zeros((n_evals[0], 1))
            data[:, 0] = samples
            lf_model.set_model_evals(data)

            # Create a high-fidelity model
            samples = hf_data[:n_evals[-1]]
            hf_model = Model(eval_fun=lambda x, samples=samples: samples[x],n_evals=n_evals[-1], n_qoi=n_qoi,
                             rv_name='High-fidelity: $Q$', label='Multi-fidelity (low, high)')

            data = np.zeros((n_evals[-1], 1))
            data[:, 0] = samples
            hf_model.set_model_evals(data)
            
            models = [lf_model, hf_model]

    else:
        print('Unknown model: %r' % model)
        exit()

    if fw_uq_method == 'mc':
        print('')
        print('########### MC statistics ###########')
        print('')
        print('MC mean:\t\t\t\t\t\t%s' % prior_pf_mc_samples[0, :, :].mean(axis=0))
        print('MC std:\t\t\t\t\t\t\t%s' % prior_pf_mc_samples[0, :, :].std(axis=0))
        print('')
        print('########################################')
        print('')

    if fw_uq_method == 'mfmc':
        # Setup MFMC
        mfmc = MFMC(models=models, mc_model=mc_model,
                    training_set_strategy=training_set_strategy, regression_type=regression_type)

        # Apply MFMC
        mfmc.apply_mfmc_framework()

        # Calculate Monte Carlo reference
        if mc_model is not None:
            mfmc.calculate_mc_reference()
            mc = True
        else:
            mc = False

        # Diagnostics
        mfmc.print_stats(mc=mc)

        # Plots
        mfmc.plot_results(mc=mc)
        mfmc.plot_regression_models()
        mfmc.plot_joint_densities()

        # Get prior push-forward samples
        prior_pf_samples = mfmc.get_samples()
        if mc:
            prior_pf_mc_samples = mfmc.get_mc_samples()

    return prior_samples, prior_pf_samples, obs_loc, obs_scale, prior_pf_mc_samples