class SynLikelihoodTests(unittest.TestCase): def setUp(self): self.mu = Uniform([[-5.0], [5.0]], name='mu') self.sigma = Uniform([[5.0], [10.0]], name='sigma') self.model = Normal([self.mu, self.sigma]) self.stat_calc = Identity(degree=2, cross=False) self.likfun = SynLikelihood(self.stat_calc) # create fake simulated data self.mu._fixed_values = [1.1] self.sigma._fixed_values = [1.0] self.y_sim = self.model.forward_simulate(self.model.get_input_values(), 100, rng=np.random.RandomState(1)) def test_likelihood(self): # Checks whether wrong input type produces error message self.assertRaises(TypeError, self.likfun.loglikelihood, 3.4, [2, 1]) self.assertRaises(TypeError, self.likfun.loglikelihood, [2, 4], 3.4) # create observed data y_obs = [1.8] # calculate the statistics of the observed data comp_likelihood = self.likfun.loglikelihood(y_obs, self.y_sim) expected_likelihood = 0.20963610211945238 # This checks whether it computes a correct value and dimension is right self.assertAlmostEqual(comp_likelihood, np.log(expected_likelihood)) def test_likelihood_multiple_observations(self): y_obs = [1.8, 0.9] comp_likelihood = self.likfun.loglikelihood(y_obs, self.y_sim) print(comp_likelihood) expected_likelihood = 0.04457899184856649 # This checks whether it computes a correct value and dimension is right self.assertAlmostEqual(comp_likelihood, np.log(expected_likelihood))
def setUp(self): self.mu = Uniform([[-5.0], [5.0]], name='mu') self.sigma = Uniform([[5.0], [10.0]], name='sigma') self.model = Normal([self.mu, self.sigma]) self.stat_calc = Identity(degree=2, cross=False) self.likfun = SynLikelihood(self.stat_calc) # create fake simulated data self.mu._fixed_values = [1.1] self.sigma._fixed_values = [1.0] self.y_sim = self.model.forward_simulate(self.model.get_input_values(), 100, rng=np.random.RandomState(1))
def infer_parameters_pmc(): # define observation for true parameters mean=170, 65 rng = np.random.RandomState(seed=1) y_obs = [np.array(rng.multivariate_normal([170, 65], np.eye(2), 1).reshape(2, ))] # define prior from abcpy.continuousmodels import Uniform mu0 = Uniform([[150], [200]], name="mu0") mu1 = Uniform([[25], [100]], name="mu1") # define the model height_weight_model = NestedBivariateGaussian([mu0, mu1]) # define statistics from abcpy.statistics import Identity statistics_calculator = Identity(degree=2, cross=False) from abcpy.approx_lhd import SynLikelihood approx_lhd = SynLikelihood(statistics_calculator) # define sampling scheme from abcpy.inferences import PMC sampler = PMC([height_weight_model], [approx_lhd], backend, seed=2) # sample from scheme T, n_sample, n_samples_per_param = 2, 10, 10 print('PMC Inferring') journal = sampler.sample([y_obs], T, n_sample, n_samples_per_param) return journal
class SynLikelihoodTests(unittest.TestCase): def setUp(self): self.mu = Uniform([[-5.0], [5.0]], name='mu') self.sigma = Uniform([[5.0], [10.0]], name='sigma') self.model = Normal([self.mu, self.sigma]) self.stat_calc = Identity(degree=2, cross=0) self.likfun = SynLikelihood(self.stat_calc) def test_likelihood(self): #Checks whether wrong input type produces error message self.assertRaises(TypeError, self.likfun.likelihood, 3.4, [2, 1]) self.assertRaises(TypeError, self.likfun.likelihood, [2, 4], 3.4) # create observed data y_obs = [9.8] # create fake simulated data self.mu._fixed_values = [1.1] self.sigma._fixed_values = [1.0] y_sim = self.model.forward_simulate(self.model.get_input_values(), 100, rng=np.random.RandomState(1)) # calculate the statistics of the observed data comp_likelihood = self.likfun.likelihood(y_obs, y_sim) expected_likelihood = 0.00924953470649 # This checks whether it computes a correct value and dimension is right self.assertLess(comp_likelihood - expected_likelihood, 10e-2)
def setUp(self): self.stat_calc1 = Identity(degree = 1, cross = 0) self.stat_calc2 = Identity(degree= 1, cross = 0) self.likfun1 = SynLikelihood(self.stat_calc1) self.likfun2 = SynLikelihood(self.stat_calc2) ## Define Models # define a uniform prior distribution self.mu = Uniform([[-5.0], [5.0]], name='mu') self.sigma = Uniform([[0.0], [10.0]], name='sigma') # define a Gaussian model self.model1 = Normal([self.mu,self.sigma]) self.model2 = Normal([self.mu,self.sigma]) #Check whether wrong sized distnacefuncs gives an error self.assertRaises(ValueError, ProductCombination, [self.model1,self.model2], [self.likfun1]) self.jointapprox_lhd = ProductCombination([self.model1, self.model2], [self.likfun1, self.likfun2])
class SynLikelihoodTests(unittest.TestCase): def setUp(self): self.mu = Uniform([[-5.0], [5.0]], name='mu') self.sigma = Uniform([[5.0], [10.0]], name='sigma') self.model = Normal([self.mu, self.sigma]) self.stat_calc = Identity(degree=2, cross=False) self.likfun = SynLikelihood(self.stat_calc) # create fake simulated data self.mu._fixed_values = [1.1] self.sigma._fixed_values = [1.0] self.y_sim = self.model.forward_simulate(self.model.get_input_values(), 100, rng=np.random.RandomState(1)) def test_likelihood(self): # Checks whether wrong input type produces error message self.assertRaises(TypeError, self.likfun.loglikelihood, 3.4, [2, 1]) self.assertRaises(TypeError, self.likfun.loglikelihood, [2, 4], 3.4) # create observed data y_obs = [1.8] # calculate the statistics of the observed data comp_loglikelihood = self.likfun.loglikelihood(y_obs, self.y_sim) expected_loglikelihood = -0.6434435652263701 # This checks whether it computes a correct value and dimension is right self.assertAlmostEqual(comp_loglikelihood, expected_loglikelihood) def test_likelihood_multiple_observations(self): y_obs = [1.8, 0.9] comp_loglikelihood = self.likfun.loglikelihood(y_obs, self.y_sim) expected_loglikelihood = -1.2726154993040115 # This checks whether it computes a correct value and dimension is right self.assertAlmostEqual(comp_loglikelihood, expected_loglikelihood) def test_loglikelihood_additive(self): y_obs = [1.8, 0.9] comp_loglikelihood_a = self.likfun.loglikelihood([y_obs[0]], self.y_sim) comp_loglikelihood_b = self.likfun.loglikelihood([y_obs[1]], self.y_sim) comp_loglikelihood_two = self.likfun.loglikelihood(y_obs, self.y_sim) self.assertAlmostEqual(comp_loglikelihood_two, comp_loglikelihood_a + comp_loglikelihood_b)
sigma_abc = Uniform([[sigma_bounds[0]], [sigma_bounds[1]]], name='sigma') ABC_model = IidNormal([mu_abc, sigma_abc], iid_size=10, name='gaussian') statistic = GaussianStatistics() if results_folder is None: results_folder = "results/gaussian/" observation_folder = results_folder + '/' + args.observation_folder + "/" inference_folder = results_folder + '/' + args.inference_folder + "/" extract_params_and_weights_from_journal = extract_params_and_weights_from_journal_gaussian extract_posterior_mean_from_journal = extract_posterior_mean_from_journal_gaussian else: raise NotImplementedError save_dict_to_json(args.__dict__, inference_folder + 'config.json') # now setup the Synthetic likelihood experiments or ratio estimation one: if technique == "SL": approx_lhd = SynLikelihood(statistic) elif technique == "RE": # for the RE approach: it is better to use pairwise combinations of the statistics in order to make comparison with # SL fair statistic = Identity( cross=True, previous_statistics=statistic, degree=1) # this should automatically use the pairwise comb. # when instantiating this, it takes additional parameters; does it simulate from the model immediately? approx_lhd = PenLogReg(statistic, [ABC_model], n_samples_per_param, n_folds=10, max_iter=100000, seed=seed) else: raise NotImplementedError
def infer_parameters(): # The data corresponding to model_1 defined below grades_obs = [ 3.872486707973337, 4.6735380808674405, 3.9703538990858376, 4.11021272048805, 4.211048655421368, 4.154817956586653, 4.0046893064392695, 4.01891381384729, 4.123804757702919, 4.014941267301294, 3.888174595940634, 4.185275142948246, 4.55148774469135, 3.8954427675259016, 4.229264035335705, 3.839949451328312, 4.039402553532825, 4.128077814241238, 4.361488645531874, 4.086279074446419, 4.370801602256129, 3.7431697332475466, 4.459454162392378, 3.8873973643008255, 4.302566721487124, 4.05556051626865, 4.128817316703757, 3.8673704442215984, 4.2174459453805015, 4.202280254493361, 4.072851400451234, 3.795173229398952, 4.310702877332585, 4.376886328810306, 4.183704734748868, 4.332192463368128, 3.9071312388426587, 4.311681374107893, 3.55187913252144, 3.318878360783221, 4.187850500877817, 4.207923106081567, 4.190462065625179, 4.2341474252986036, 4.110228694304768, 4.1589891480847765, 4.0345604687633045, 4.090635481715123, 3.1384654393449294, 4.20375641386518, 4.150452690356067, 4.015304457401275, 3.9635442007388195, 4.075915739179875, 3.5702080541929284, 4.722333310410388, 3.9087618197155227, 4.3990088006390735, 3.968501165774181, 4.047603645360087, 4.109184340976979, 4.132424805281853, 4.444358334346812, 4.097211737683927, 4.288553086265748, 3.8668863066511303, 3.8837108501541007 ] # The prior information changing the class size and social background, depending on school location from abcpy.continuousmodels import Uniform, Normal school_location = Uniform([[0.2], [0.3]], ) # The average class size of a certain school class_size = Normal([[school_location], [0.1]], ) # The social background of a student background = Normal([[school_location], [0.1]], ) # The grade a student would receive without any bias grade_without_additional_effects = Normal([[4.5], [0.25]], ) # The grade a student of a certain school receives final_grade = grade_without_additional_effects - class_size - background # The data corresponding to model_2 defined below scholarship_obs = [ 2.7179657436207805, 2.124647285937229, 3.07193407853297, 2.335024761813643, 2.871893855192, 3.4332002458233837, 3.649996835818173, 3.50292335102711, 2.815638168018455, 2.3581613289315992, 2.2794821846395568, 2.8725835459926503, 3.5588573782815685, 2.26053126526137, 1.8998143530749971, 2.101110815311782, 2.3482974964831573, 2.2707679029919206, 2.4624550491079225, 2.867017757972507, 3.204249152084959, 2.4489542437714213, 1.875415915801106, 2.5604889644872433, 3.891985093269989, 2.7233633223405205, 2.2861070389383533, 2.9758813233490082, 3.1183403287267755, 2.911814060853062, 2.60896794303205, 3.5717098647480316, 3.3355752461779824, 1.99172284546858, 2.339937680892163, 2.9835630207301636, 2.1684912355975774, 3.014847335983034, 2.7844122961916202, 2.752119871525148, 2.1567428931391635, 2.5803629307680644, 2.7326646074552103, 2.559237193255186, 3.13478196958166, 2.388760269933492, 3.2822443541491815, 2.0114405441787437, 3.0380056368041073, 2.4889680313769724, 2.821660164621084, 3.343985964873723, 3.1866861970287808, 4.4535037154856045, 3.0026333138006027, 2.0675706089352612, 2.3835301730913185, 2.584208398359566, 3.288077633446465, 2.6955853384148183, 2.918315169739928, 3.2464814419322985, 2.1601516779909433, 3.231003347780546, 1.0893224045062178, 0.8032302688764734, 2.868438615047827 ] # A quantity that determines whether a student will receive a scholarship scholarship_without_additional_effects = Normal([[2], [0.5]], ) # A quantity determining whether a student receives a scholarship, including his social background final_scholarship = scholarship_without_additional_effects + 3 * background # Define a summary statistics for final grade and final scholarship from abcpy.statistics import Identity statistics_calculator_final_grade = Identity(degree=2, cross=False) statistics_calculator_final_scholarship = Identity(degree=3, cross=False) # Define a distance measure for final grade and final scholarship from abcpy.approx_lhd import SynLikelihood approx_lhd_final_grade = SynLikelihood(statistics_calculator_final_grade) approx_lhd_final_scholarship = SynLikelihood( statistics_calculator_final_scholarship) # Define a backend from abcpy.backends import BackendDummy as Backend backend = Backend() # Define a perturbation kernel from abcpy.perturbationkernel import DefaultKernel kernel = DefaultKernel([school_location, class_size, grade_without_additional_effects, \ background, scholarship_without_additional_effects]) # Define sampling parameters T, n_sample, n_samples_per_param = 3, 250, 10 # Define sampler from abcpy.inferences import PMC sampler = PMC([final_grade, final_scholarship], \ [approx_lhd_final_grade, approx_lhd_final_scholarship], backend, kernel) # Sample journal = sampler.sample([grades_obs, scholarship_obs], T, n_sample, n_samples_per_param)
def test_sample(self): # setup backend backend = BackendDummy() # define a uniform prior distribution mu = Uniform([[-5.0], [5.0]], name='mu') sigma = Uniform([[0.0], [10.0]], name='sigma') # define a Gaussian model self.model = Normal([mu, sigma]) # define sufficient statistics for the model stat_calc = Identity(degree=2, cross=0) # create fake observed data # y_obs = self.model.forward_simulate(1, np.random.RandomState(1))[0].tolist() y_obs = [np.array(9.8)] # Define the likelihood function likfun = SynLikelihood(stat_calc) T, n_sample, n_samples_per_param = 1, 10, 100 sampler = PMC([self.model], [likfun], backend, seed=1) journal = sampler.sample([y_obs], T, n_sample, n_samples_per_param, covFactors=np.array([.1, .1]), iniPoints=None) mu_post_sample, sigma_post_sample, post_weights = np.array( journal.get_parameters()['mu']), np.array( journal.get_parameters()['sigma']), np.array( journal.get_weights()) # Compute posterior mean mu_post_mean, sigma_post_mean = journal.posterior_mean( )['mu'], journal.posterior_mean()['sigma'] # test shape of sample mu_sample_shape, sigma_sample_shape, weights_sample_shape = (len(mu_post_sample), mu_post_sample[0].shape[1]), \ (len(sigma_post_sample), sigma_post_sample[0].shape[1]), post_weights.shape self.assertEqual(mu_sample_shape, (10, 1)) self.assertEqual(sigma_sample_shape, (10, 1)) self.assertEqual(weights_sample_shape, (10, 1)) self.assertLess(abs(mu_post_mean - (-3.373004641385251)), 1e-3) self.assertLess(abs(sigma_post_mean - 6.519325027532673), 1e-3) self.assertFalse(journal.number_of_simulations == 0) # use the PMC scheme for T = 2 T, n_sample, n_samples_per_param = 2, 10, 100 sampler = PMC([self.model], [likfun], backend, seed=1) journal = sampler.sample([y_obs], T, n_sample, n_samples_per_param, covFactors=np.array([.1, .1]), iniPoints=None) mu_post_sample, sigma_post_sample, post_weights = np.array( journal.get_parameters()['mu']), np.array( journal.get_parameters()['sigma']), np.array( journal.get_weights()) # Compute posterior mean mu_post_mean, sigma_post_mean = journal.posterior_mean( )['mu'], journal.posterior_mean()['sigma'] # test shape of sample mu_sample_shape, sigma_sample_shape, weights_sample_shape = (len(mu_post_sample), mu_post_sample[0].shape[1]), \ (len(sigma_post_sample), sigma_post_sample[0].shape[1]), post_weights.shape self.assertEqual(mu_sample_shape, (10, 1)) self.assertEqual(sigma_sample_shape, (10, 1)) self.assertEqual(weights_sample_shape, (10, 1)) self.assertLess(abs(mu_post_mean - (-3.2517600952705257)), 1e-3) self.assertLess(abs(sigma_post_mean - 6.9214661382633365), 1e-3) self.assertFalse(journal.number_of_simulations == 0)
def setUp(self): self.mu = Uniform([[-5.0], [5.0]], name='mu') self.sigma = Uniform([[5.0], [10.0]], name='sigma') self.model = Normal([self.mu, self.sigma]) self.stat_calc = Identity(degree=2, cross=0) self.likfun = SynLikelihood(self.stat_calc)