示例#1
0
def test_stochastic_optimization():
    fun = lambda x : x**2
    bounds = ((-1, 1),)
    its = int(1e3)
    polish=True
    loc, val = stochastic_optimization(fun, bounds, its, polish)
    assert abs(loc - 0.0) < 1e-5
    assert abs(val - 0.0) < 1e-5
示例#2
0
    def extract_result(self):
        x_min, _ = stochastic_optimization(self.target_model.predict_mean,
                                           self.target_model.bounds,
                                           seed=self.seed)

        batch_min = arr2d_to_batch(x_min, self.parameter_names)
        outputs = arr2d_to_batch(self.target_model.X, self.parameter_names)
        outputs[self.target_name] = self.target_model.Y

        return OptimizationResult(x_min=batch_min,
                                  outputs=outputs,
                                  **self._extract_result_kwargs())
示例#3
0
def test_BOLFI():
    m, true_params = setup_ma2_with_informative_data()

    # Log discrepancy tends to work better
    log_d = NodeReference(m['d'],
                          state=dict(_operation=np.log),
                          model=m,
                          name='log_d')

    bolfi = elfi.BOLFI(log_d,
                       initial_evidence=20,
                       update_interval=10,
                       batch_size=5,
                       bounds={
                           't1': (-2, 2),
                           't2': (-1, 1)
                       },
                       acq_noise_var=.1)
    n = 300
    res = bolfi.infer(300)
    assert bolfi.target_model.n_evidence == 300
    acq_x = bolfi.target_model._gp.X

    # check_inference_with_informative_data(res, 1, true_params, error_bound=.2)
    assert np.abs(res.x_min['t1'] - true_params['t1']) < 0.2
    assert np.abs(res.x_min['t2'] - true_params['t2']) < 0.2

    # Test that you can continue the inference where we left off
    res = bolfi.infer(n + 10)
    assert bolfi.target_model.n_evidence == n + 10
    assert np.array_equal(bolfi.target_model._gp.X[:n, :], acq_x)

    post = bolfi.extract_posterior()

    # TODO: make cleaner.
    post_ml = minimize(post._neg_unnormalized_loglikelihood,
                       post.model.bounds,
                       post._gradient_neg_unnormalized_loglikelihood,
                       post.prior,
                       post.n_inits,
                       post.max_opt_iters,
                       random_state=post.random_state)[0]
    # TODO: Here we cannot use the minimize method due to sharp edges in the posterior.
    #       If a MAP method is implemented, one must be able to set the optimizer and
    #       provide its options.
    post_map = stochastic_optimization(post._neg_unnormalized_logposterior,
                                       post.model.bounds)[0]
    vals_ml = dict(t1=np.array([post_ml[0]]), t2=np.array([post_ml[1]]))
    check_inference_with_informative_data(vals_ml,
                                          1,
                                          true_params,
                                          error_bound=.2)
    vals_map = dict(t1=np.array([post_map[0]]), t2=np.array([post_map[1]]))
    check_inference_with_informative_data(vals_map,
                                          1,
                                          true_params,
                                          error_bound=.2)

    n_samples = 400
    n_chains = 4
    res_sampling = bolfi.sample(n_samples, n_chains=n_chains)
    check_inference_with_informative_data(res_sampling.samples,
                                          n_samples // 2 * n_chains,
                                          true_params,
                                          error_bound=.2)

    # check the cached predictions for RBF
    x = np.random.random((1, len(true_params)))
    bolfi.target_model.is_sampling = True

    pred_mu, pred_var = bolfi.target_model._gp.predict(x)
    pred_cached_mu, pred_cached_var = bolfi.target_model.predict(x)
    assert (np.allclose(pred_mu, pred_cached_mu))
    assert (np.allclose(pred_var, pred_cached_var))

    grad_mu, grad_var = bolfi.target_model._gp.predictive_gradients(x)
    grad_cached_mu, grad_cached_var = bolfi.target_model.predictive_gradients(
        x)
    assert (np.allclose(grad_mu[:, :, 0], grad_cached_mu))
    assert (np.allclose(grad_var, grad_cached_var))

    # test calculation of prior logpdfs
    true_logpdf_prior = ma2.CustomPrior1.logpdf(x[0, 0], 2)
    true_logpdf_prior += ma2.CustomPrior2.logpdf(x[0, 1], x[0, 0, ], 1)

    assert np.isclose(true_logpdf_prior, post.prior.logpdf(x[0, :]))