Ejemplo n.º 1
0
    def test_get_statistics(self):
        univariate_variables = [
            stats.uniform(2, 4), stats.beta(1, 1, -1, 2), stats.norm(0, 1)]
        variable = IndependentMultivariateRandomVariable(univariate_variables)
        mean = variable.get_statistics('mean')
        assert np.allclose(mean.squeeze(), [4, 0, 0])

        intervals = variable.get_statistics('interval', alpha=1)
        assert np.allclose(intervals, np.array(
            [[2, 6], [-1, 1], [-np.inf, np.inf]]))
Ejemplo n.º 2
0
    def test_bayesian_importance_sampling_avar(self):
        np.random.seed(1)
        nrandom_vars = 2
        Amat = np.array([[-0.5, 1]])
        noise_std = 0.1
        prior_variable = IndependentMultivariateRandomVariable(
            [stats.norm(0, 1)] * nrandom_vars)
        prior_mean = prior_variable.get_statistics('mean')
        prior_cov = np.diag(prior_variable.get_statistics('var')[:, 0])
        prior_cov_inv = np.linalg.inv(prior_cov)
        noise_cov_inv = np.eye(Amat.shape[0]) / noise_std**2
        true_sample = np.array([.4] * nrandom_vars)[:, None]
        collected_obs = Amat.dot(true_sample)
        collected_obs += np.random.normal(0, noise_std, (collected_obs.shape))
        exact_post_mean, exact_post_cov = \
            laplace_posterior_approximation_for_linear_models(
                Amat, prior_mean, prior_cov_inv, noise_cov_inv,
                collected_obs)

        chol_factor = np.linalg.cholesky(exact_post_cov)
        chol_factor_inv = np.linalg.inv(chol_factor)

        def g_model(samples):
            return np.exp(
                np.sum(chol_factor_inv.dot(samples - exact_post_mean),
                       axis=0))[:, None]

        nsamples = int(1e6)
        prior_samples = generate_independent_random_samples(
            prior_variable, nsamples)
        posterior_samples = chol_factor.dot(
            np.random.normal(0, 1, (nrandom_vars, nsamples))) + exact_post_mean

        g_mu, g_sigma = 0, np.sqrt(nrandom_vars)
        f, f_cdf, f_pdf, VaR, CVaR, ssd, ssd_disutil = \
            get_lognormal_example_exact_quantities(g_mu, g_sigma)

        beta = .1
        cvar_exact = CVaR(beta)

        cvar_mc = conditional_value_at_risk(g_model(posterior_samples), beta)

        prior_pdf = prior_variable.pdf
        post_pdf = stats.multivariate_normal(mean=exact_post_mean[:, 0],
                                             cov=exact_post_cov).pdf
        weights = post_pdf(prior_samples.T) / prior_pdf(prior_samples)[:, 0]
        weights /= weights.sum()
        cvar_im = conditional_value_at_risk(g_model(prior_samples), beta,
                                            weights)
        # print(cvar_exact, cvar_mc, cvar_im)
        assert np.allclose(cvar_exact, cvar_mc, rtol=1e-3)
        assert np.allclose(cvar_exact, cvar_im, rtol=2e-3)
Ejemplo n.º 3
0
    def help_compare_prediction_based_oed(self, deviation_fun,
                                          gauss_deviation_fun,
                                          use_gauss_quadrature,
                                          ninner_loop_samples, ndesign_vars,
                                          tol):
        ncandidates_1d = 5
        design_candidates = cartesian_product(
            [np.linspace(-1, 1, ncandidates_1d)] * ndesign_vars)
        ncandidates = design_candidates.shape[1]

        # Define model used to predict likely observable data
        indices = compute_hyperbolic_indices(ndesign_vars, 1)[:, 1:]
        Amat = monomial_basis_matrix(indices, design_candidates)
        obs_fun = partial(linear_obs_fun, Amat)

        # Define model used to predict unobservable QoI
        qoi_fun = exponential_qoi_fun

        # Define the prior PDF of the unknown variables
        nrandom_vars = indices.shape[1]
        prior_variable = IndependentMultivariateRandomVariable(
            [stats.norm(0, 0.5)] * nrandom_vars)

        # Define the independent observational noise
        noise_std = 1

        # Define initial design
        init_design_indices = np.array([ncandidates // 2])

        # Define OED options
        nouter_loop_samples = 100
        if use_gauss_quadrature:
            # 301 needed for cvar deviation
            # only 31 needed for variance deviation
            ninner_loop_samples_1d = ninner_loop_samples
            var_trans = AffineRandomVariableTransformation(prior_variable)
            x_quad, w_quad = gauss_hermite_pts_wts_1D(ninner_loop_samples_1d)
            x_quad = cartesian_product([x_quad] * nrandom_vars)
            w_quad = outer_product([w_quad] * nrandom_vars)
            x_quad = var_trans.map_from_canonical_space(x_quad)
            ninner_loop_samples = x_quad.shape[1]

            def generate_inner_prior_samples(nsamples):
                assert nsamples == x_quad.shape[1], (nsamples, x_quad.shape)
                return x_quad, w_quad
        else:
            # use default Monte Carlo sampling
            generate_inner_prior_samples = None

        # Define initial design
        init_design_indices = np.array([ncandidates // 2])

        # Setup OED problem
        oed = BayesianBatchDeviationOED(design_candidates,
                                        obs_fun,
                                        noise_std,
                                        prior_variable,
                                        qoi_fun,
                                        nouter_loop_samples,
                                        ninner_loop_samples,
                                        generate_inner_prior_samples,
                                        deviation_fun=deviation_fun)
        oed.populate()
        oed.set_collected_design_indices(init_design_indices)

        prior_mean = oed.prior_variable.get_statistics('mean')
        prior_cov = np.diag(prior_variable.get_statistics('var')[:, 0])
        prior_cov_inv = np.linalg.inv(prior_cov)
        selected_indices = init_design_indices

        # Generate experimental design
        nexperiments = 3
        for step in range(len(init_design_indices), nexperiments):
            # Copy current state of OED before new data is determined
            # This copy will be used to compute Laplace based utility and
            # evidence values for testing
            oed_copy = copy.deepcopy(oed)

            # Update the design
            utility_vals, selected_indices = oed.update_design()

            utility, deviations, evidences, weights = \
                oed_copy.compute_expected_utility(
                    oed_copy.collected_design_indices, selected_indices, True)

            exact_deviations = np.empty(nouter_loop_samples)
            for jj in range(nouter_loop_samples):
                # only test intermediate quantities associated with design
                # chosen by the OED step
                idx = oed.collected_design_indices
                obs_jj = oed_copy.outer_loop_obs[jj:jj + 1, idx]

                noise_cov_inv_jj = np.eye(idx.shape[0]) / noise_std**2
                exact_post_mean_jj, exact_post_cov_jj = \
                    laplace_posterior_approximation_for_linear_models(
                        Amat[idx, :],
                        prior_mean, prior_cov_inv, noise_cov_inv_jj, obs_jj.T)

                exact_deviations[jj] = gauss_deviation_fun(
                    exact_post_mean_jj, exact_post_cov_jj)
            print('d',
                  np.absolute(exact_deviations - deviations[:, 0]).max(), tol)
            # print(exact_deviations, deviations[:, 0])
            assert np.allclose(exact_deviations, deviations[:, 0], atol=tol)
            assert np.allclose(utility_vals[selected_indices],
                               -np.mean(exact_deviations),
                               atol=tol)
Ejemplo n.º 4
0
    def test_sequential_kl_oed(self):
        """
        Observations collected ARE used to inform subsequent designs
        """
        nrandom_vars = 1
        noise_std = 1
        ndesign = 5
        nouter_loop_samples = int(1e4)
        ninner_loop_samples = 31

        ncandidates = 6
        design_candidates = np.linspace(-1, 1, ncandidates)[None, :]

        def obs_fun(samples):
            assert design_candidates.ndim == 2
            assert samples.ndim == 2
            Amat = design_candidates.T
            return Amat.dot(samples).T

        prior_variable = IndependentMultivariateRandomVariable(
            [stats.norm(0, 1)] * nrandom_vars)

        true_sample = np.array([.4] * nrandom_vars)[:, None]

        def obs_process(new_design_indices):
            obs = obs_fun(true_sample)[:, new_design_indices]
            obs += oed.noise_fun(obs)
            return obs

        x_quad, w_quad = gauss_hermite_pts_wts_1D(ninner_loop_samples)

        def generate_inner_prior_samples_gauss(n):
            # use precomputed samples so to avoid cost of regenerating
            assert n == x_quad.shape[0]
            return x_quad[None, :], w_quad

        generate_inner_prior_samples = generate_inner_prior_samples_gauss

        # Define initial design
        init_design_indices = np.array([ncandidates // 2])
        oed = BayesianSequentialKLOED(design_candidates, obs_fun, noise_std,
                                      prior_variable, obs_process,
                                      nouter_loop_samples, ninner_loop_samples,
                                      generate_inner_prior_samples)
        oed.populate()
        oed.set_collected_design_indices(init_design_indices)

        prior_mean = oed.prior_variable.get_statistics('mean')
        prior_cov = np.diag(prior_variable.get_statistics('var')[:, 0])
        prior_cov_inv = np.linalg.inv(prior_cov)

        exact_post_mean_prev = prior_mean
        exact_post_cov_prev = prior_cov
        post_var_prev = stats.multivariate_normal(mean=exact_post_mean_prev[:,
                                                                            0],
                                                  cov=exact_post_cov_prev)
        selected_indices = init_design_indices

        # Because of Monte Carlo error set step tols individually
        # It is too expensive to up the number of outer_loop samples to
        # reduce errors
        step_tols = [7.3e-3, 6.5e-2, 3.3e-2, 1.6e-1]

        for step in range(len(init_design_indices), ndesign):
            current_design = design_candidates[:, oed.collected_design_indices]
            noise_cov_inv = np.eye(current_design.shape[1]) / noise_std**2

            # Compute posterior moving from previous posterior and using
            # only the most recently collected data
            noise_cov_inv_incr = np.eye(
                selected_indices.shape[0]) / noise_std**2
            exact_post_mean, exact_post_cov = \
                laplace_posterior_approximation_for_linear_models(
                    design_candidates[:, selected_indices].T,
                    exact_post_mean_prev, np.linalg.inv(exact_post_cov_prev),
                    noise_cov_inv_incr, oed.collected_obs[:, -1:].T)

            # check using current posteior as prior and only using new
            # data (above) produces the same posterior as using original prior
            # and all collected data (from_prior approach). The posteriors
            # should be the same but the evidences will be difference.
            # This is tested below
            exact_post_mean_from_prior, exact_post_cov_from_prior = \
                laplace_posterior_approximation_for_linear_models(
                    current_design.T, prior_mean, prior_cov_inv, noise_cov_inv,
                    oed.collected_obs.T)

            assert np.allclose(exact_post_mean, exact_post_mean_from_prior)
            assert np.allclose(exact_post_cov, exact_post_cov_from_prior)

            # Compute PDF of current posterior that uses all collected data
            post_var = stats.multivariate_normal(
                mean=exact_post_mean[:, 0].copy(), cov=exact_post_cov.copy())

            # Compute evidence moving from previous posterior to
            # new posterior (not initial prior to posterior).
            # Values can be computed exactly for Gaussian prior and noise
            gauss_evidence = laplace_evidence(
                lambda x: np.exp(
                    gaussian_loglike_fun(
                        oed.collected_obs[:, -1:],
                        obs_fun(x)[:, oed.collected_design_indices[-1:]],
                        noise_std))[:, 0],
                lambda y: np.atleast_2d(post_var_prev.pdf(y.T)).T,
                exact_post_cov, exact_post_mean)

            # Compute evidence using Gaussian quadrature rule. This
            # is possible for this low-dimensional example.
            quad_loglike_vals = np.exp(
                gaussian_loglike_fun(
                    oed.collected_obs[:, -1:],
                    obs_fun(
                        x_quad[None, :])[:, oed.collected_design_indices[-1:]],
                    noise_std))[:, 0]
            # we must divide integarnd by initial prior_pdf since it is
            # already implicilty included via the quadrature weights
            integrand_vals = quad_loglike_vals * post_var_prev.pdf(
                x_quad[:, None]) / prior_variable.pdf(x_quad[None, :])[:, 0]
            quad_evidence = integrand_vals.dot(w_quad)
            # print(quad_evidence, gauss_evidence)
            assert np.allclose(gauss_evidence, quad_evidence), step

            # print('G', gauss_evidence, oed.evidence)
            assert np.allclose(gauss_evidence, oed.evidence), step

            # compute the evidence of moving from the initial prior
            # to the current posterior. This will be used for testing later
            gauss_evidence_from_prior = laplace_evidence(
                lambda x: np.exp(
                    gaussian_loglike_fun(
                        oed.collected_obs,
                        obs_fun(x)[:, oed.collected_design_indices], noise_std)
                )[:, 0], prior_variable.pdf, exact_post_cov, exact_post_mean)

            # Copy current state of OED before new data is determined
            # This copy will be used to compute Laplace based utility and
            # evidence values for testing
            oed_copy = copy.deepcopy(oed)

            # Update the design
            utility_vals, selected_indices = oed.update_design()
            new_obs = oed.obs_process(selected_indices)
            oed.update_observations(new_obs)
            utility = utility_vals[selected_indices]

            # Re-compute the evidences that were used to update the design
            # above. This will be used for testing later
            # print('D', oed_copy.evidence)
            evidences = oed_copy.compute_expected_utility(
                oed_copy.collected_design_indices, selected_indices, True)[1]

            # print('Collected plus selected indices',
            #       oed.collected_design_indices,
            #       oed_copy.collected_design_indices, selected_indices)

            # For all outer loop samples compute the posterior exactly
            # and compute intermediate values for testing. While OED
            # considers all possible candidate design indices
            # Here we just test the one that was chosen last when
            # design was updated
            exact_evidences = np.empty(nouter_loop_samples)
            exact_kl_divs = np.empty_like(exact_evidences)
            for jj in range(nouter_loop_samples):
                # Fill obs with those predicted by outer loop sample
                idx = oed.collected_design_indices
                obs_jj = oed_copy.outer_loop_obs[jj:jj + 1, idx]
                # Overwrite the previouly simulated obs with collected obs.
                # Do not ovewrite the last value which is the potential
                # data used to compute expected utility
                obs_jj[:, :oed_copy.collected_obs.shape[1]] = \
                    oed_copy.collected_obs

                # Compute the posterior obtained by using predicted value
                # of outer loop sample
                noise_cov_inv_jj = np.eye(
                    selected_indices.shape[0]) / noise_std**2
                exact_post_mean_jj, exact_post_cov_jj = \
                    laplace_posterior_approximation_for_linear_models(
                        design_candidates[:, selected_indices].T,
                        exact_post_mean, np.linalg.inv(exact_post_cov),
                        noise_cov_inv_jj, obs_jj[:, -1].T)

                # Use post_pdf so measure change from current posterior (prior)
                # to new posterior
                gauss_evidence_jj = laplace_evidence(
                    lambda x: np.exp(
                        gaussian_loglike_fun(obs_jj[:, -1:],
                                             obs_fun(x)[:, selected_indices],
                                             noise_std))[:, 0],
                    lambda y: np.atleast_2d(post_var.pdf(y.T)).T,
                    exact_post_cov_jj, exact_post_mean_jj)
                exact_evidences[jj] = gauss_evidence_jj

                # Check quadrature gets the same answer
                quad_loglike_vals = np.exp(
                    gaussian_loglike_fun(
                        obs_jj[:, -1:],
                        obs_fun(x_quad[None, :])[:, selected_indices],
                        noise_std))[:, 0]
                integrand_vals = quad_loglike_vals * post_var.pdf(
                    x_quad[:, None]) / prior_variable.pdf(x_quad[None, :])[:,
                                                                           0]
                quad_evidence = integrand_vals.dot(w_quad)
                # print(quad_evidence, gauss_evidence_jj)
                assert np.allclose(gauss_evidence_jj, quad_evidence), step

                # Check that evidence of moving from current posterior
                # to new posterior with (potential data from outer-loop sample)
                # is equal to the evidence of moving from
                # intitial prior to new posterior divide by the evidence
                # from moving from the initial prior to the current posterior
                gauss_evidence_jj_from_prior = laplace_evidence(
                    lambda x: np.exp(
                        gaussian_loglike_fun(obs_jj,
                                             obs_fun(x)[:, idx], noise_std)
                    )[:, 0], prior_variable.pdf, exact_post_cov_jj,
                    exact_post_mean_jj)
                # print(gauss_evidence_jj_from_prior/gauss_evidence_from_prior,
                #       gauss_evidence_jj)
                # print('gauss_evidence_from_prior', gauss_evidence_from_prior)
                assert np.allclose(
                    gauss_evidence_jj_from_prior / gauss_evidence_from_prior,
                    gauss_evidence_jj)

                gauss_kl_div = gaussian_kl_divergence(exact_post_mean_jj,
                                                      exact_post_cov_jj,
                                                      exact_post_mean,
                                                      exact_post_cov)
                # gauss_kl_div = gaussian_kl_divergence(
                #     exact_post_mean, exact_post_cov,
                #     exact_post_mean_jj, exact_post_cov_jj)
                exact_kl_divs[jj] = gauss_kl_div

            # print(evidences[:, 0], exact_evidences)
            assert np.allclose(evidences[:, 0], exact_evidences)

            # Outer loop samples are from prior. Use importance reweighting
            # to sample from previous posterior. This step is only relevant
            # for open loop design (used here)
            # where observed data informs current estimate
            # of parameters. Closed loop design (not used here)
            # never collects data and so it always samples from the prior.
            post_weights = post_var.pdf(
                oed.outer_loop_prior_samples.T) / post_var_prev.pdf(
                    oed.outer_loop_prior_samples.T) / oed.nouter_loop_samples
            laplace_utility = np.sum(exact_kl_divs * post_weights)
            # print('u', (utility-laplace_utility)/laplace_utility, step)
            assert np.allclose(utility,
                               laplace_utility,
                               rtol=step_tols[step - 1])

            exact_post_mean_prev = exact_post_mean
            exact_post_cov_prev = exact_post_cov
            post_var_prev = post_var
Ejemplo n.º 5
0
    def test_compute_expected_kl_utility_monte_carlo(self):
        nrandom_vars = 1
        noise_std = .3
        design = np.linspace(-1, 1, 2)[None, :]
        Amat = design.T

        def obs_fun(x):
            return (Amat.dot(x)).T

        def noise_fun(values):
            return np.random.normal(0, noise_std, (values.shape))

        # specify the first design point
        collected_design_indices = np.array([0])

        prior_variable = IndependentMultivariateRandomVariable(
            [stats.norm(0, 1)] * nrandom_vars)

        prior_mean = prior_variable.get_statistics('mean')
        prior_cov = np.diag(prior_variable.get_statistics('var')[:, 0])
        prior_cov_inv = np.linalg.inv(prior_cov)
        noise_cov_inv = np.eye(Amat.shape[0]) / noise_std**2

        def generate_random_prior_samples(n):
            return (generate_independent_random_samples(prior_variable,
                                                        n), np.ones(n) / n)

        def generate_inner_prior_samples_mc(n):
            return generate_random_prior_samples(n), np.ones(n) / n

        ninner_loop_samples = 300
        x, w = gauss_hermite_pts_wts_1D(ninner_loop_samples)

        def generate_inner_prior_samples_gauss(n):
            # use precomputed samples so to avoid cost of regenerating
            assert n == x.shape[0]
            return x[None, :], w

        generate_inner_prior_samples = generate_inner_prior_samples_gauss

        nouter_loop_samples = 10000
        outer_loop_obs, outer_loop_pred_obs, inner_loop_pred_obs, \
            inner_loop_weights, __, __ = \
            precompute_compute_expected_kl_utility_data(
                generate_random_prior_samples, nouter_loop_samples, obs_fun,
                noise_fun, ninner_loop_samples,
                generate_inner_prior_samples=generate_inner_prior_samples)

        new_design_indices = np.array([1])

        outer_loop_weights = np.ones(
            (nouter_loop_samples, 1)) / nouter_loop_samples

        def log_likelihood_fun(obs, pred_obs, active_indices=None):
            return gaussian_loglike_fun(obs, pred_obs, noise_std,
                                        active_indices)

        utility = compute_expected_kl_utility_monte_carlo(
            log_likelihood_fun, outer_loop_obs, outer_loop_pred_obs,
            inner_loop_pred_obs, inner_loop_weights, outer_loop_weights,
            collected_design_indices, new_design_indices, False)

        kl_divs = []
        # overwrite subset of obs with previously collected data
        # make copy so that outerloop obs can be used again
        outer_loop_obs_copy = outer_loop_obs.copy()
        for ii in range(nouter_loop_samples):
            idx = np.hstack((collected_design_indices, new_design_indices))
            obs_ii = outer_loop_obs_copy[ii:ii + 1, idx]

            idx = np.hstack((collected_design_indices, new_design_indices))
            exact_post_mean, exact_post_cov = \
                laplace_posterior_approximation_for_linear_models(
                    Amat[idx, :], prior_mean, prior_cov_inv,
                    noise_cov_inv[np.ix_(idx, idx)], obs_ii.T)

            kl_div = gaussian_kl_divergence(exact_post_mean, exact_post_cov,
                                            prior_mean, prior_cov)
            kl_divs.append(kl_div)

        print(utility - np.mean(kl_divs), utility, np.mean(kl_divs))
        assert np.allclose(utility, np.mean(kl_divs), rtol=2e-2)