Exemplo n.º 1
0
 def setUp(self):
     self.prior = Uniform([-5.0, 5.0], [5.0, 10.0], seed=1)
     self.model = Gaussian(self.prior, mu=1.1, sigma=1.0, seed=1)
     self.stat_calc = Identity(degree=2, cross=0)
     self.likfun = PenLogReg(self.stat_calc,
                             self.model,
                             n_simulate=100,
                             n_folds=10)
Exemplo n.º 2
0
 def setUp(self):
     self.mu = Uniform([[-5.0], [5.0]], name='mu')
     self.sigma = Uniform([[5.0], [10.0]], name='sigma')
     self.model = Normal([self.mu, self.sigma])
     self.stat_calc = Identity(degree=2, cross=0)
     self.likfun = PenLogReg(self.stat_calc, [self.model],
                             n_simulate=100,
                             n_folds=10,
                             max_iter=100000,
                             seed=1)
Exemplo n.º 3
0
class PenLogRegTests(unittest.TestCase):
    def setUp(self):
        self.mu = Uniform([[-5.0], [5.0]], name='mu')
        self.sigma = Uniform([[5.0], [10.0]], name='sigma')
        self.model = Normal([self.mu, self.sigma])
        self.model_bivariate = Uniform([[0, 0], [1, 1]], name="model")
        self.stat_calc = Identity(degree=2, cross=1)
        self.likfun = PenLogReg(self.stat_calc, [self.model], n_simulate=100, n_folds=10, max_iter=100000, seed=1)
        self.likfun_wrong_n_sim = PenLogReg(self.stat_calc, [self.model], n_simulate=10, n_folds=10, max_iter=100000,
                                            seed=1)
        self.likfun_bivariate = PenLogReg(self.stat_calc, [self.model_bivariate], n_simulate=100, n_folds=10,
                                          max_iter=100000, seed=1)

        self.y_obs = self.model.forward_simulate(self.model.get_input_values(), 1, rng=np.random.RandomState(1))
        self.y_obs_bivariate = self.model_bivariate.forward_simulate(self.model_bivariate.get_input_values(), 1,
                                                                     rng=np.random.RandomState(1))
        self.y_obs_double = self.model.forward_simulate(self.model.get_input_values(), 2, rng=np.random.RandomState(1))
        self.y_obs_bivariate_double = self.model_bivariate.forward_simulate(self.model_bivariate.get_input_values(), 2,
                                                                            rng=np.random.RandomState(1))
        # create fake simulated data
        self.mu._fixed_values = [1.1]
        self.sigma._fixed_values = [1.0]
        self.y_sim = self.model.forward_simulate(self.model.get_input_values(), 100, rng=np.random.RandomState(1))
        self.y_sim_bivariate = self.model_bivariate.forward_simulate(self.model_bivariate.get_input_values(), 100,
                                                                     rng=np.random.RandomState(1))

    def test_likelihood(self):
        # Checks whether wrong input type produces error message
        self.assertRaises(TypeError, self.likfun.loglikelihood, 3.4, [2, 1])
        self.assertRaises(TypeError, self.likfun.loglikelihood, [2, 4], 3.4)

        # create observed data
        comp_likelihood = self.likfun.loglikelihood(self.y_obs, self.y_sim)
        expected_likelihood = 9.77317308598673e-08
        # This checks whether it computes a correct value and dimension is right. Not correct as it does not check the
        # absolute value:
        # self.assertLess(comp_likelihood - expected_likelihood, 10e-2)
        self.assertAlmostEqual(comp_likelihood, np.log(expected_likelihood))

        # check if it returns the correct error when n_samples does not match:
        self.assertRaises(RuntimeError, self.likfun_wrong_n_sim.loglikelihood, self.y_obs, self.y_sim)

        # try now with the bivariate uniform model:
        comp_likelihood_biv = self.likfun_bivariate.loglikelihood(self.y_obs_bivariate, self.y_sim_bivariate)
        expected_likelihood_biv = 0.999999999999999
        self.assertAlmostEqual(comp_likelihood_biv, np.log(expected_likelihood_biv))

    def test_likelihood_multiple_observations(self):
        comp_likelihood = self.likfun.loglikelihood(self.y_obs, self.y_sim)
        expected_likelihood = 9.77317308598673e-08
        self.assertAlmostEqual(comp_likelihood, np.log(expected_likelihood))

        expected_likelihood_biv = 0.9999999999999979
        comp_likelihood_biv = self.likfun_bivariate.loglikelihood(self.y_obs_bivariate, self.y_sim_bivariate)
        self.assertAlmostEqual(comp_likelihood_biv, np.log(expected_likelihood_biv))
Exemplo n.º 4
0
class PenLogRegTests(unittest.TestCase):
    def setUp(self):
        self.mu = Uniform([[-5.0], [5.0]], name='mu')
        self.sigma = Uniform([[5.0], [10.0]], name='sigma')
        self.model = Normal([self.mu, self.sigma])
        self.stat_calc = Identity(degree=2, cross=0)
        self.likfun = PenLogReg(self.stat_calc, [self.model],
                                n_simulate=100,
                                n_folds=10,
                                max_iter=100000,
                                seed=1)

    def test_likelihood(self):

        #Checks whether wrong input type produces error message
        self.assertRaises(TypeError, self.likfun.likelihood, 3.4, [2, 1])
        self.assertRaises(TypeError, self.likfun.likelihood, [2, 4], 3.4)

        # create observed data
        y_obs = self.model.forward_simulate(
            self.model.get_input_values(), 1,
            rng=np.random.RandomState(1))[0].tolist()
        # create fake simulated data
        self.mu._fixed_values = [1.1]
        self.sigma._fixed_values = [1.0]
        y_sim = self.model.forward_simulate(self.model.get_input_values(),
                                            100,
                                            rng=np.random.RandomState(1))
        comp_likelihood = self.likfun.likelihood(y_obs, y_sim)
        expected_likelihood = 4.3996556327224594
        # This checks whether it computes a correct value and dimension is right
        self.assertLess(comp_likelihood - expected_likelihood, 10e-2)
Exemplo n.º 5
0
class PenLogRegTests(unittest.TestCase):
    def setUp(self):
        self.prior = Uniform([-5.0, 5.0], [5.0, 10.0], seed=1)
        self.model = Gaussian(self.prior, mu=1.1, sigma=1.0, seed=1)
        self.stat_calc = Identity(degree=2, cross=0)
        self.likfun = PenLogReg(self.stat_calc,
                                self.model,
                                n_simulate=100,
                                n_folds=10)

    def test_likelihood(self):

        #Checks whether wrong input type produces error message
        self.assertRaises(TypeError, self.likfun.likelihood, 3.4, [2, 1])
        self.assertRaises(TypeError, self.likfun.likelihood, [2, 4], 3.4)

        # create observed data
        y_obs = self.model.simulate(1)
        # create fake simulated data
        self.model.set_parameters(np.array([1.1, 1.0]))
        y_sim = self.model.simulate(100)
        comp_likelihood = self.likfun.likelihood(y_obs, y_sim)
        expected_likelihood = 4.39965563272
        # This checks whether it computes a correct value and dimension is right
        self.assertLess(comp_likelihood - expected_likelihood, 10e-2)
Exemplo n.º 6
0
    def setUp(self):
        self.mu = Uniform([[-5.0], [5.0]], name='mu')
        self.sigma = Uniform([[5.0], [10.0]], name='sigma')
        self.model = Normal([self.mu, self.sigma])
        self.model_bivariate = Uniform([[0, 0], [1, 1]], name="model")
        self.stat_calc = Identity(degree=2, cross=1)
        self.likfun = PenLogReg(self.stat_calc, [self.model],
                                n_simulate=100,
                                n_folds=10,
                                max_iter=100000,
                                seed=1)
        self.likfun_wrong_n_sim = PenLogReg(self.stat_calc, [self.model],
                                            n_simulate=10,
                                            n_folds=10,
                                            max_iter=100000,
                                            seed=1)
        self.likfun_bivariate = PenLogReg(self.stat_calc,
                                          [self.model_bivariate],
                                          n_simulate=100,
                                          n_folds=10,
                                          max_iter=100000,
                                          seed=1)

        self.y_obs = self.model.forward_simulate(self.model.get_input_values(),
                                                 1,
                                                 rng=np.random.RandomState(1))
        self.y_obs_bivariate = self.model_bivariate.forward_simulate(
            self.model_bivariate.get_input_values(),
            1,
            rng=np.random.RandomState(1))
        self.y_obs_double = self.model.forward_simulate(
            self.model.get_input_values(), 2, rng=np.random.RandomState(1))
        self.y_obs_bivariate_double = self.model_bivariate.forward_simulate(
            self.model_bivariate.get_input_values(),
            2,
            rng=np.random.RandomState(1))
        # create fake simulated data
        self.mu._fixed_values = [1.1]
        self.sigma._fixed_values = [1.0]
        self.y_sim = self.model.forward_simulate(self.model.get_input_values(),
                                                 100,
                                                 rng=np.random.RandomState(1))
        self.y_sim_bivariate = self.model_bivariate.forward_simulate(
            self.model_bivariate.get_input_values(),
            100,
            rng=np.random.RandomState(1))
Exemplo n.º 7
0
    raise NotImplementedError
save_dict_to_json(args.__dict__, inference_folder + 'config.json')

# now setup the Synthetic likelihood experiments or ratio estimation one:
if technique == "SL":
    approx_lhd = SynLikelihood(statistic)
elif technique == "RE":
    # for the RE approach: it is better to use pairwise combinations of the statistics in order to make comparison with
    # SL fair
    statistic = Identity(
        cross=True, previous_statistics=statistic,
        degree=1)  # this should automatically use the pairwise comb.
    # when instantiating this, it takes additional parameters; does it simulate from the model immediately?
    approx_lhd = PenLogReg(statistic, [ABC_model],
                           n_samples_per_param,
                           n_folds=10,
                           max_iter=100000,
                           seed=seed)
else:
    raise NotImplementedError

sampler = PMC([ABC_model], [approx_lhd], backend, seed=seed)

wass_dist_array = np.zeros(n_observations)
RMSE_post_mean = np.zeros(n_observations)
running_time_inference = np.zeros(n_observations)

namefile_postfix_no_index = "_steps_{}_n_samples_{}_n_samples_per_param_{}".format(
    steps, n_samples, n_samples_per_param)
if perform_postprocessing:
    theta_test = np.zeros(