class PenLogRegTests(unittest.TestCase): def setUp(self): self.mu = Uniform([[-5.0], [5.0]], name='mu') self.sigma = Uniform([[5.0], [10.0]], name='sigma') self.model = Normal([self.mu, self.sigma]) self.stat_calc = Identity(degree=2, cross=0) self.likfun = PenLogReg(self.stat_calc, [self.model], n_simulate=100, n_folds=10, max_iter=100000, seed=1) def test_likelihood(self): #Checks whether wrong input type produces error message self.assertRaises(TypeError, self.likfun.likelihood, 3.4, [2, 1]) self.assertRaises(TypeError, self.likfun.likelihood, [2, 4], 3.4) # create observed data y_obs = self.model.forward_simulate( self.model.get_input_values(), 1, rng=np.random.RandomState(1))[0].tolist() # create fake simulated data self.mu._fixed_values = [1.1] self.sigma._fixed_values = [1.0] y_sim = self.model.forward_simulate(self.model.get_input_values(), 100, rng=np.random.RandomState(1)) comp_likelihood = self.likfun.likelihood(y_obs, y_sim) expected_likelihood = 4.3996556327224594 # This checks whether it computes a correct value and dimension is right self.assertLess(comp_likelihood - expected_likelihood, 10e-2)
class ProductCombinationTests(unittest.TestCase): def setUp(self): self.stat_calc1 = Identity(degree=1, cross=0) self.stat_calc2 = Identity(degree=1, cross=0) self.likfun1 = SynLiklihood(self.stat_calc1) self.likfun2 = SynLiklihood(self.stat_calc2) ## Define Models # define a uniform prior distribution self.mu = Uniform([[-5.0], [5.0]], name='mu') self.sigma = Uniform([[0.0], [10.0]], name='sigma') # define a Gaussian model self.model1 = Normal([self.mu, self.sigma]) self.model2 = Normal([self.mu, self.sigma]) #Check whether wrong sized distnacefuncs gives an error self.assertRaises(ValueError, ProductCombination, [self.model1, self.model2], [self.likfun1]) self.jointapprox_lhd = ProductCombination([self.model1, self.model2], [self.likfun1, self.likfun2]) def test_likelihood(self): # test simple distance computation a = [[0, 0, 0], [0, 0, 0]] b = [[0, 0, 0], [0, 0, 0]] c = [[1, 1, 1], [1, 1, 1]] #Checks whether wrong input type produces error message self.assertRaises(TypeError, self.jointapprox_lhd.likelihood, 3.4, [[2, 1]]) self.assertRaises(TypeError, self.jointapprox_lhd.likelihood, [[2, 4]], 3.4) # test input has different dimensionality self.assertRaises(BaseException, self.jointapprox_lhd.likelihood, [a], [b, c]) self.assertRaises(BaseException, self.jointapprox_lhd.likelihood, [b, c], [a]) # test whether they compute correct values # create observed data y_obs = [[9.8], [9.8]] # create fake simulated data self.mu._fixed_values = [1.1] self.sigma._fixed_values = [1.0] y_sim_1 = self.model1.forward_simulate(self.model1.get_input_values(), 100, rng=np.random.RandomState(1)) y_sim_2 = self.model2.forward_simulate(self.model2.get_input_values(), 100, rng=np.random.RandomState(1)) # calculate the statistics of the observed data comp_likelihood = self.jointapprox_lhd.likelihood( y_obs, [y_sim_1, y_sim_2]) expected_likelihood = 8.612491843767518e-43 # This checks whether it computes a correct value and dimension is right self.assertLess(comp_likelihood - expected_likelihood, 10e-2)
class PenLogRegTests(unittest.TestCase): def setUp(self): self.mu = Uniform([[-5.0], [5.0]], name='mu') self.sigma = Uniform([[5.0], [10.0]], name='sigma') self.model = Normal([self.mu, self.sigma]) self.model_bivariate = Uniform([[0, 0], [1, 1]], name="model") self.stat_calc = Identity(degree=2, cross=1) self.likfun = PenLogReg(self.stat_calc, [self.model], n_simulate=100, n_folds=10, max_iter=100000, seed=1) self.likfun_wrong_n_sim = PenLogReg(self.stat_calc, [self.model], n_simulate=10, n_folds=10, max_iter=100000, seed=1) self.likfun_bivariate = PenLogReg(self.stat_calc, [self.model_bivariate], n_simulate=100, n_folds=10, max_iter=100000, seed=1) self.y_obs = self.model.forward_simulate(self.model.get_input_values(), 1, rng=np.random.RandomState(1)) self.y_obs_bivariate = self.model_bivariate.forward_simulate(self.model_bivariate.get_input_values(), 1, rng=np.random.RandomState(1)) self.y_obs_double = self.model.forward_simulate(self.model.get_input_values(), 2, rng=np.random.RandomState(1)) self.y_obs_bivariate_double = self.model_bivariate.forward_simulate(self.model_bivariate.get_input_values(), 2, rng=np.random.RandomState(1)) # create fake simulated data self.mu._fixed_values = [1.1] self.sigma._fixed_values = [1.0] self.y_sim = self.model.forward_simulate(self.model.get_input_values(), 100, rng=np.random.RandomState(1)) self.y_sim_bivariate = self.model_bivariate.forward_simulate(self.model_bivariate.get_input_values(), 100, rng=np.random.RandomState(1)) def test_likelihood(self): # Checks whether wrong input type produces error message self.assertRaises(TypeError, self.likfun.loglikelihood, 3.4, [2, 1]) self.assertRaises(TypeError, self.likfun.loglikelihood, [2, 4], 3.4) # create observed data comp_likelihood = self.likfun.loglikelihood(self.y_obs, self.y_sim) expected_likelihood = 9.77317308598673e-08 # This checks whether it computes a correct value and dimension is right. Not correct as it does not check the # absolute value: # self.assertLess(comp_likelihood - expected_likelihood, 10e-2) self.assertAlmostEqual(comp_likelihood, np.log(expected_likelihood)) # check if it returns the correct error when n_samples does not match: self.assertRaises(RuntimeError, self.likfun_wrong_n_sim.loglikelihood, self.y_obs, self.y_sim) # try now with the bivariate uniform model: comp_likelihood_biv = self.likfun_bivariate.loglikelihood(self.y_obs_bivariate, self.y_sim_bivariate) expected_likelihood_biv = 0.999999999999999 self.assertAlmostEqual(comp_likelihood_biv, np.log(expected_likelihood_biv)) def test_likelihood_multiple_observations(self): comp_likelihood = self.likfun.loglikelihood(self.y_obs, self.y_sim) expected_likelihood = 9.77317308598673e-08 self.assertAlmostEqual(comp_likelihood, np.log(expected_likelihood)) expected_likelihood_biv = 0.9999999999999979 comp_likelihood_biv = self.likfun_bivariate.loglikelihood(self.y_obs_bivariate, self.y_sim_bivariate) self.assertAlmostEqual(comp_likelihood_biv, np.log(expected_likelihood_biv))
class SynLikelihoodTests(unittest.TestCase): def setUp(self): self.mu = Uniform([[-5.0], [5.0]], name='mu') self.sigma = Uniform([[5.0], [10.0]], name='sigma') self.model = Normal([self.mu, self.sigma]) self.stat_calc = Identity(degree=2, cross=False) self.likfun = SynLikelihood(self.stat_calc) # create fake simulated data self.mu._fixed_values = [1.1] self.sigma._fixed_values = [1.0] self.y_sim = self.model.forward_simulate(self.model.get_input_values(), 100, rng=np.random.RandomState(1)) def test_likelihood(self): # Checks whether wrong input type produces error message self.assertRaises(TypeError, self.likfun.loglikelihood, 3.4, [2, 1]) self.assertRaises(TypeError, self.likfun.loglikelihood, [2, 4], 3.4) # create observed data y_obs = [1.8] # calculate the statistics of the observed data comp_likelihood = self.likfun.loglikelihood(y_obs, self.y_sim) expected_likelihood = 0.20963610211945238 # This checks whether it computes a correct value and dimension is right self.assertAlmostEqual(comp_likelihood, np.log(expected_likelihood)) def test_likelihood_multiple_observations(self): y_obs = [1.8, 0.9] comp_likelihood = self.likfun.loglikelihood(y_obs, self.y_sim) print(comp_likelihood) expected_likelihood = 0.04457899184856649 # This checks whether it computes a correct value and dimension is right self.assertAlmostEqual(comp_likelihood, np.log(expected_likelihood))
class SynLiklihoodTests(unittest.TestCase): def setUp(self): self.mu = Uniform([[-5.0], [5.0]], name='mu') self.sigma = Uniform([[5.0], [10.0]], name='sigma') self.model = Normal([self.mu, self.sigma]) self.stat_calc = Identity(degree=2, cross=0) self.likfun = SynLiklihood(self.stat_calc) def test_likelihood(self): #Checks whether wrong input type produces error message self.assertRaises(TypeError, self.likfun.likelihood, 3.4, [2, 1]) self.assertRaises(TypeError, self.likfun.likelihood, [2, 4], 3.4) # create observed data y_obs = [9.8] # create fake simulated data self.mu._fixed_values = [1.1] self.sigma._fixed_values = [1.0] y_sim = self.model.forward_simulate(self.model.get_input_values(), 100, rng=np.random.RandomState(1)) # calculate the statistics of the observed data comp_likelihood = self.likfun.likelihood(y_obs, y_sim) expected_likelihood = 0.00924953470649 # This checks whether it computes a correct value and dimension is right self.assertLess(comp_likelihood - expected_likelihood, 10e-2)
def test_transformation(self): if has_torch: # Transform statistics extraction self.new_statistics_calculator = self.statisticslearning.get_statistics( ) self.new_statistics_calculator_with_scaler = self.statisticslearning_with_scaler.get_statistics( ) # Simulate observed data Obs = Normal([2, 4]) y_obs = Obs.forward_simulate(Obs.get_input_values(), 1)[0].tolist() extracted_statistics = self.new_statistics_calculator.statistics( y_obs) self.assertEqual(np.shape(extracted_statistics), (1, 2)) self.assertRaises(RuntimeError, self.new_statistics_calculator.statistics, [np.array([1, 2])]) extracted_statistics = self.new_statistics_calculator_with_scaler.statistics( y_obs) self.assertEqual(np.shape(extracted_statistics), (1, 2)) self.assertRaises( ValueError, self.new_statistics_calculator_with_scaler.statistics, [np.array([1, 2])])
def test_transformation(self): if has_torch: self.new_statistics_calculator = self.statisticslearning_all_defaults.get_statistics( ) # with no scaler on data: self.new_statistics_calculator_no_scaler = self.statisticslearning_scale.get_statistics( ) # with no rescaling of the statistics: self.new_statistics_calculator_no_rescale = self.statisticslearning_all_defaults.get_statistics( rescale_statistics=False) # Simulate observed data Obs = Normal([2, 4]) y_obs = Obs.forward_simulate(Obs.get_input_values(), 1)[0].tolist() extracted_statistics = self.new_statistics_calculator.statistics( y_obs) self.assertEqual(np.shape(extracted_statistics), (1, 2)) extracted_statistics_no_rescale = self.new_statistics_calculator_no_rescale.statistics( y_obs) self.assertEqual(np.shape(extracted_statistics_no_rescale), (1, 2)) self.assertFalse( np.allclose(extracted_statistics_no_rescale, extracted_statistics)) self.assertRaises(RuntimeError, self.new_statistics_calculator.statistics, [np.array([1, 2])]) self.assertRaises( RuntimeError, self.new_statistics_calculator_no_scaler.statistics, [np.array([1, 2])])
def test_transformation(self): # Transform statistics extraction self.statistics_cal.statistics = lambda x, f2=self.summaryselection.transformation, f1=self.statistics_cal.statistics: f2(f1(x)) # Simulate observed data Obs = Normal([2, 4] ) y_obs = Obs.forward_simulate(Obs.get_input_values(), 1)[0].tolist() extracted_statistics = self.statistics_cal.statistics(y_obs) self.assertEqual(np.shape(extracted_statistics), (1,2))
def test_transformation(self): # Transform statistics extraction self.new_statistics_calculator = self.statisticslearning.get_statistics( ) # Simulate observed data Obs = Normal([2, 4]) y_obs = Obs.forward_simulate(Obs.get_input_values(), 1)[0].tolist() extracted_statistics = self.new_statistics_calculator.statistics(y_obs) self.assertEqual(np.shape(extracted_statistics), (1, 2))
class SemiParametricSynLikelihoodTests(unittest.TestCase): def setUp(self): self.mu = Uniform([[-5.0], [5.0]], name='mu') self.sigma = Uniform([[5.0], [10.0]], name='sigma') self.model = Normal([self.mu, self.sigma]) self.stat_calc_1 = Identity(degree=1, cross=False) self.likfun_1 = SemiParametricSynLikelihood(self.stat_calc_1) self.stat_calc = Identity(degree=2, cross=False) self.likfun = SemiParametricSynLikelihood(self.stat_calc) # create fake simulated data self.mu._fixed_values = [1.1] self.sigma._fixed_values = [1.0] self.y_sim = self.model.forward_simulate(self.model.get_input_values(), 100, rng=np.random.RandomState(1)) def test_likelihood(self): # Checks whether wrong input type produces error message self.assertRaises(TypeError, self.likfun.loglikelihood, 3.4, [2, 1]) self.assertRaises(TypeError, self.likfun.loglikelihood, [2, 4], 3.4) # create observed data y_obs = [1.8] # check whether it raises correct error with input of wrong size self.assertRaises(RuntimeError, self.likfun_1.loglikelihood, y_obs, self.y_sim) # calculate the statistics of the observed data comp_loglikelihood = self.likfun.loglikelihood(y_obs, self.y_sim) expected_loglikelihood = -2.3069321875272815 # This checks whether it computes a correct value and dimension is right self.assertAlmostEqual(comp_loglikelihood, expected_loglikelihood) def test_likelihood_multiple_observations(self): y_obs = [1.8, 0.9] comp_loglikelihood = self.likfun.loglikelihood(y_obs, self.y_sim) expected_loglikelihood = -3.7537571275591683 # This checks whether it computes a correct value and dimension is right self.assertAlmostEqual(comp_loglikelihood, expected_loglikelihood) def test_loglikelihood_additive(self): y_obs = [1.8, 0.9] comp_loglikelihood_a = self.likfun.loglikelihood([y_obs[0]], self.y_sim) comp_loglikelihood_b = self.likfun.loglikelihood([y_obs[1]], self.y_sim) comp_loglikelihood_two = self.likfun.loglikelihood(y_obs, self.y_sim) self.assertAlmostEqual(comp_loglikelihood_two, comp_loglikelihood_a + comp_loglikelihood_b)