def setUp(self): # define prior and model sigma = Uniform([[10], [20]]) mu = Normal([0, 1]) self.Y = Normal([mu, sigma]) # define backend self.backend = Backend() # define statistics self.statistics_cal = Identity(degree=3, cross=False) if has_torch: # Initialize statistics learning self.statisticslearning = SemiautomaticNN([self.Y], self.statistics_cal, self.backend, n_samples=100, n_samples_per_param=1, seed=1, n_epochs=10, scale_samples=False) # with sample scaler: self.statisticslearning_with_scaler = SemiautomaticNN( [self.Y], self.statistics_cal, self.backend, n_samples=100, n_samples_per_param=1, seed=1, n_epochs=10, scale_samples=True)
class SynLikelihoodTests(unittest.TestCase): def setUp(self): self.mu = Uniform([[-5.0], [5.0]], name='mu') self.sigma = Uniform([[5.0], [10.0]], name='sigma') self.model = Normal([self.mu, self.sigma]) self.stat_calc = Identity(degree=2, cross=False) self.likfun = SynLikelihood(self.stat_calc) # create fake simulated data self.mu._fixed_values = [1.1] self.sigma._fixed_values = [1.0] self.y_sim = self.model.forward_simulate(self.model.get_input_values(), 100, rng=np.random.RandomState(1)) def test_likelihood(self): # Checks whether wrong input type produces error message self.assertRaises(TypeError, self.likfun.loglikelihood, 3.4, [2, 1]) self.assertRaises(TypeError, self.likfun.loglikelihood, [2, 4], 3.4) # create observed data y_obs = [1.8] # calculate the statistics of the observed data comp_likelihood = self.likfun.loglikelihood(y_obs, self.y_sim) expected_likelihood = 0.20963610211945238 # This checks whether it computes a correct value and dimension is right self.assertAlmostEqual(comp_likelihood, np.log(expected_likelihood)) def test_likelihood_multiple_observations(self): y_obs = [1.8, 0.9] comp_likelihood = self.likfun.loglikelihood(y_obs, self.y_sim) print(comp_likelihood) expected_likelihood = 0.04457899184856649 # This checks whether it computes a correct value and dimension is right self.assertAlmostEqual(comp_likelihood, np.log(expected_likelihood))
class PenLogRegTests(unittest.TestCase): def setUp(self): self.mu = Uniform([[-5.0], [5.0]], name='mu') self.sigma = Uniform([[5.0], [10.0]], name='sigma') self.model = Normal([self.mu, self.sigma]) self.stat_calc = Identity(degree=2, cross=0) self.likfun = PenLogReg(self.stat_calc, [self.model], n_simulate=100, n_folds=10, max_iter=100000, seed=1) def test_likelihood(self): #Checks whether wrong input type produces error message self.assertRaises(TypeError, self.likfun.likelihood, 3.4, [2, 1]) self.assertRaises(TypeError, self.likfun.likelihood, [2, 4], 3.4) # create observed data y_obs = self.model.forward_simulate( self.model.get_input_values(), 1, rng=np.random.RandomState(1))[0].tolist() # create fake simulated data self.mu._fixed_values = [1.1] self.sigma._fixed_values = [1.0] y_sim = self.model.forward_simulate(self.model.get_input_values(), 100, rng=np.random.RandomState(1)) comp_likelihood = self.likfun.likelihood(y_obs, y_sim) expected_likelihood = 4.3996556327224594 # This checks whether it computes a correct value and dimension is right self.assertLess(comp_likelihood - expected_likelihood, 10e-2)
def test_transformation(self): if has_torch: self.new_statistics_calculator = self.statisticslearning_all_defaults.get_statistics( ) # with no scaler on data: self.new_statistics_calculator_no_scaler = self.statisticslearning_scale.get_statistics( ) # with no rescaling of the statistics: self.new_statistics_calculator_no_rescale = self.statisticslearning_all_defaults.get_statistics( rescale_statistics=False) # Simulate observed data Obs = Normal([2, 4]) y_obs = Obs.forward_simulate(Obs.get_input_values(), 1)[0].tolist() extracted_statistics = self.new_statistics_calculator.statistics( y_obs) self.assertEqual(np.shape(extracted_statistics), (1, 2)) extracted_statistics_no_rescale = self.new_statistics_calculator_no_rescale.statistics( y_obs) self.assertEqual(np.shape(extracted_statistics_no_rescale), (1, 2)) self.assertFalse( np.allclose(extracted_statistics_no_rescale, extracted_statistics)) self.assertRaises(RuntimeError, self.new_statistics_calculator.statistics, [np.array([1, 2])]) self.assertRaises( RuntimeError, self.new_statistics_calculator_no_scaler.statistics, [np.array([1, 2])])
def setUp(self): self.stat_calc1 = Identity(degree=1, cross=0) self.stat_calc2 = Identity(degree=1, cross=0) self.distancefunc1 = Euclidean(self.stat_calc1) self.distancefunc2 = Euclidean(self.stat_calc2) ## Define Models # define a uniform prior distribution mu = Uniform([[-5.0], [5.0]], name='mu') sigma = Uniform([[0.0], [10.0]], name='sigma') # define a Gaussian model self.model1 = Normal([mu, sigma]) self.model2 = Normal([mu, sigma]) #Check whether wrong sized distnacefuncs gives an error self.assertRaises(ValueError, LinearCombination, [self.model1, self.model2], [self.distancefunc1], [1.0, 1.0]) #Check whether wrong sized weights gives an error self.assertRaises(ValueError, LinearCombination, [self.model1, self.model2], [self.distancefunc1, self.distancefunc2], [1.0, 1.0, 1.0]) self.jointdistancefunc = LinearCombination( [self.model1, self.model2], [self.distancefunc1, self.distancefunc2], [1.0, 1.0])
def test_DefaultKernel(self): B1 = Binomial([10, 0.2]) N1 = Normal([0.1, 0.01]) N2 = Normal([0.3, N1]) graph = Normal([B1, N2]) Manager = AcceptedParametersManager([graph]) backend = Backend() kernel = DefaultKernel([N1, N2, B1]) Manager.update_broadcast(backend, [[2, 0.27, 0.097], [3, 0.32, 0.012]], np.array([1, 1]), accepted_cov_mats=[[[0.01, 0], [0, 0.01]], []]) kernel_parameters = [] for krnl in kernel.kernels: kernel_parameters.append( Manager.get_accepted_parameters_bds_values(krnl.models)) Manager.update_kernel_values(backend, kernel_parameters=kernel_parameters) rng = np.random.RandomState(1) perturbed_values_and_models = kernel.update(Manager, 1, rng) self.assertEqual(perturbed_values_and_models, [(N1, [0.17443453636632419]), (N2, [0.25882435863499248]), (B1, [3])])
class SynLiklihoodTests(unittest.TestCase): def setUp(self): self.mu = Uniform([[-5.0], [5.0]], name='mu') self.sigma = Uniform([[5.0], [10.0]], name='sigma') self.model = Normal([self.mu, self.sigma]) self.stat_calc = Identity(degree=2, cross=0) self.likfun = SynLiklihood(self.stat_calc) def test_likelihood(self): #Checks whether wrong input type produces error message self.assertRaises(TypeError, self.likfun.likelihood, 3.4, [2, 1]) self.assertRaises(TypeError, self.likfun.likelihood, [2, 4], 3.4) # create observed data y_obs = [9.8] # create fake simulated data self.mu._fixed_values = [1.1] self.sigma._fixed_values = [1.0] y_sim = self.model.forward_simulate(self.model.get_input_values(), 100, rng=np.random.RandomState(1)) # calculate the statistics of the observed data comp_likelihood = self.likfun.likelihood(y_obs, y_sim) expected_likelihood = 0.00924953470649 # This checks whether it computes a correct value and dimension is right self.assertLess(comp_likelihood - expected_likelihood, 10e-2)
def test_transformation(self): if has_torch: # Transform statistics extraction self.new_statistics_calculator = self.statisticslearning.get_statistics( ) self.new_statistics_calculator_with_scaler = self.statisticslearning_with_scaler.get_statistics( ) # Simulate observed data Obs = Normal([2, 4]) y_obs = Obs.forward_simulate(Obs.get_input_values(), 1)[0].tolist() extracted_statistics = self.new_statistics_calculator.statistics( y_obs) self.assertEqual(np.shape(extracted_statistics), (1, 2)) self.assertRaises(RuntimeError, self.new_statistics_calculator.statistics, [np.array([1, 2])]) extracted_statistics = self.new_statistics_calculator_with_scaler.statistics( y_obs) self.assertEqual(np.shape(extracted_statistics), (1, 2)) self.assertRaises( ValueError, self.new_statistics_calculator_with_scaler.statistics, [np.array([1, 2])])
def test_doesnt_raise(self): N1 = Normal([0.1, 0.01]) N2 = Normal([0.3, N1]) kernel = MultivariateNormalKernel([N1, N2]) try: JointPerturbationKernel([kernel]) except ValueError: self.fail("JointPerturbationKernel raises an exception")
def test_transformation(self): # Transform statistics extraction self.statistics_cal.statistics = lambda x, f2=self.summaryselection.transformation, f1=self.statistics_cal.statistics: f2(f1(x)) # Simulate observed data Obs = Normal([2, 4] ) y_obs = Obs.forward_simulate(Obs.get_input_values(), 1)[0].tolist() extracted_statistics = self.statistics_cal.statistics(y_obs) self.assertEqual(np.shape(extracted_statistics), (1,2))
class ProductCombinationTests(unittest.TestCase): def setUp(self): self.stat_calc1 = Identity(degree=1, cross=0) self.stat_calc2 = Identity(degree=1, cross=0) self.likfun1 = SynLiklihood(self.stat_calc1) self.likfun2 = SynLiklihood(self.stat_calc2) ## Define Models # define a uniform prior distribution self.mu = Uniform([[-5.0], [5.0]], name='mu') self.sigma = Uniform([[0.0], [10.0]], name='sigma') # define a Gaussian model self.model1 = Normal([self.mu, self.sigma]) self.model2 = Normal([self.mu, self.sigma]) #Check whether wrong sized distnacefuncs gives an error self.assertRaises(ValueError, ProductCombination, [self.model1, self.model2], [self.likfun1]) self.jointapprox_lhd = ProductCombination([self.model1, self.model2], [self.likfun1, self.likfun2]) def test_likelihood(self): # test simple distance computation a = [[0, 0, 0], [0, 0, 0]] b = [[0, 0, 0], [0, 0, 0]] c = [[1, 1, 1], [1, 1, 1]] #Checks whether wrong input type produces error message self.assertRaises(TypeError, self.jointapprox_lhd.likelihood, 3.4, [[2, 1]]) self.assertRaises(TypeError, self.jointapprox_lhd.likelihood, [[2, 4]], 3.4) # test input has different dimensionality self.assertRaises(BaseException, self.jointapprox_lhd.likelihood, [a], [b, c]) self.assertRaises(BaseException, self.jointapprox_lhd.likelihood, [b, c], [a]) # test whether they compute correct values # create observed data y_obs = [[9.8], [9.8]] # create fake simulated data self.mu._fixed_values = [1.1] self.sigma._fixed_values = [1.0] y_sim_1 = self.model1.forward_simulate(self.model1.get_input_values(), 100, rng=np.random.RandomState(1)) y_sim_2 = self.model2.forward_simulate(self.model2.get_input_values(), 100, rng=np.random.RandomState(1)) # calculate the statistics of the observed data comp_likelihood = self.jointapprox_lhd.likelihood( y_obs, [y_sim_1, y_sim_2]) expected_likelihood = 8.612491843767518e-43 # This checks whether it computes a correct value and dimension is right self.assertLess(comp_likelihood - expected_likelihood, 10e-2)
def setUp(self): self.mu = Uniform([[-5.0], [5.0]], name='mu') self.sigma = Uniform([[5.0], [10.0]], name='sigma') self.model = Normal([self.mu, self.sigma]) self.stat_calc = Identity(degree=2, cross=0) self.likfun = PenLogReg(self.stat_calc, [self.model], n_simulate=100, n_folds=10, max_iter=100000, seed=1)
def test_transformation(self): # Transform statistics extraction self.new_statistics_calculator = self.statisticslearning.get_statistics( ) # Simulate observed data Obs = Normal([2, 4]) y_obs = Obs.forward_simulate(Obs.get_input_values(), 1)[0].tolist() extracted_statistics = self.new_statistics_calculator.statistics(y_obs) self.assertEqual(np.shape(extracted_statistics), (1, 2))
def setUp(self): self.mu = Uniform([[-5.0], [5.0]], name='mu') self.sigma = Uniform([[5.0], [10.0]], name='sigma') self.model = Normal([self.mu, self.sigma]) self.stat_calc = Identity(degree=2, cross=False) self.likfun = SynLikelihood(self.stat_calc) # create fake simulated data self.mu._fixed_values = [1.1] self.sigma._fixed_values = [1.0] self.y_sim = self.model.forward_simulate(self.model.get_input_values(), 100, rng=np.random.RandomState(1))
def test(self): B1 = Binomial([10, 0.2]) N1 = Normal([0.1, 0.01]) N2 = Normal([0.3, N1]) graph = Normal([B1, N2]) Manager = AcceptedParametersManager([graph]) mapping, mapping_index = Manager.get_mapping([graph]) self.assertEqual(mapping, [(B1,0),(N2,1),(N1,2)])
class SemiParametricSynLikelihoodTests(unittest.TestCase): def setUp(self): self.mu = Uniform([[-5.0], [5.0]], name='mu') self.sigma = Uniform([[5.0], [10.0]], name='sigma') self.model = Normal([self.mu, self.sigma]) self.stat_calc_1 = Identity(degree=1, cross=False) self.likfun_1 = SemiParametricSynLikelihood(self.stat_calc_1) self.stat_calc = Identity(degree=2, cross=False) self.likfun = SemiParametricSynLikelihood(self.stat_calc) # create fake simulated data self.mu._fixed_values = [1.1] self.sigma._fixed_values = [1.0] self.y_sim = self.model.forward_simulate(self.model.get_input_values(), 100, rng=np.random.RandomState(1)) def test_likelihood(self): # Checks whether wrong input type produces error message self.assertRaises(TypeError, self.likfun.loglikelihood, 3.4, [2, 1]) self.assertRaises(TypeError, self.likfun.loglikelihood, [2, 4], 3.4) # create observed data y_obs = [1.8] # check whether it raises correct error with input of wrong size self.assertRaises(RuntimeError, self.likfun_1.loglikelihood, y_obs, self.y_sim) # calculate the statistics of the observed data comp_loglikelihood = self.likfun.loglikelihood(y_obs, self.y_sim) expected_loglikelihood = -2.3069321875272815 # This checks whether it computes a correct value and dimension is right self.assertAlmostEqual(comp_loglikelihood, expected_loglikelihood) def test_likelihood_multiple_observations(self): y_obs = [1.8, 0.9] comp_loglikelihood = self.likfun.loglikelihood(y_obs, self.y_sim) expected_loglikelihood = -3.7537571275591683 # This checks whether it computes a correct value and dimension is right self.assertAlmostEqual(comp_loglikelihood, expected_loglikelihood) def test_loglikelihood_additive(self): y_obs = [1.8, 0.9] comp_loglikelihood_a = self.likfun.loglikelihood([y_obs[0]], self.y_sim) comp_loglikelihood_b = self.likfun.loglikelihood([y_obs[1]], self.y_sim) comp_loglikelihood_two = self.likfun.loglikelihood(y_obs, self.y_sim) self.assertAlmostEqual(comp_loglikelihood_two, comp_loglikelihood_a + comp_loglikelihood_b)
def test(self): B1 = Binomial([10, 0.2]) N1 = Normal([0.1, 0.01]) N2 = Normal([0.3, N1]) graph = Normal([B1, N2]) Manager = AcceptedParametersManager([graph]) backend = Backend() Manager.update_broadcast(backend, [[2,3,4],[0.27,0.32,0.28],[0.97,0.12,0.99]]) values = Manager.get_accepted_parameters_bds_values([B1,N2,N1]) values_expected = [np.array(x).reshape(-1,) for x in [[2,3,4],[0.27,0.32,0.28],[0.97,0.12,0.99]]] self.assertTrue(all([all(a == b) for a, b in zip(values, values_expected)]))
def setUp(self): # define prior and model sigma = Uniform([[10], [20]]) mu = Normal([0, 1]) Y = Normal([mu, sigma]) # define backend self.backend = Backend() # define statistics self.statistics_cal = Identity(degree = 3, cross = False) # Initialize summaryselection self.summaryselection = Semiautomatic([Y], self.statistics_cal, self.backend, n_samples = 1000, n_samples_per_param = 1, seed = 1)
def test_sample(self): # setup backend dummy = BackendDummy() # define a uniform prior distribution mu = Uniform([[-5.0], [5.0]], name='mu') sigma = Uniform([[0.0], [10.0]], name='sigma') # define a Gaussian model self.model = Normal([mu, sigma]) # define sufficient statistics for the model stat_calc = Identity(degree=2, cross=0) # define a distance function dist_calc = Euclidean(stat_calc) # create fake observed data y_obs = [np.array(9.8)] # use the rejection sampling scheme sampler = RejectionABC([self.model], [dist_calc], dummy, seed=1) journal = sampler.sample([y_obs], 10, 1, 10) mu_sample = np.array(journal.get_parameters()['mu']) sigma_sample = np.array(journal.get_parameters()['sigma']) # test shape of samples self.assertEqual(np.shape(mu_sample), (10, 1)) self.assertEqual(np.shape(sigma_sample), (10, 1)) # Compute posterior mean #self.assertAlmostEqual(np.average(np.asarray(samples[:,0])),1.22301,10e-2) self.assertLess(np.average(mu_sample) - 1.22301, 1e-2) self.assertLess(np.average(sigma_sample) - 6.992218, 10e-2) self.assertFalse(journal.number_of_simulations == 0)
def setUp(self): # setup backend dummy = BackendDummy() # define a uniform prior distribution mu = Uniform([[-5.0], [5.0]], name='mu') sigma = Uniform([[0.0], [10.0]], name='sigma') # define a Gaussian model self.model = Normal([mu, sigma]) # define a stupid uniform model now self.model2 = Uniform([[0], [10]]) self.sampler = DrawFromPrior([self.model], dummy, seed=1) self.original_journal = self.sampler.sample(100) self.generate_from_journal = GenerateFromJournal([self.model], dummy, seed=2) self.generate_from_journal_2 = GenerateFromJournal([self.model2], dummy, seed=2) # expected mean values from bootstrapped samples: self.mu_mean = -0.2050921750330999 self.sigma_mean = 5.178647189918053 # expected mean values from subsampled samples: self.mu_mean_2 = -0.021275259024241676 self.sigma_mean_2 = 5.672004487129107
def setUp(self): if has_torch: self.net = createDefaultNN(2, 3)() self.net_with_scaler = ScalerAndNet(self.net, None) self.net_with_discard_wrapper = DiscardLastOutputNet(self.net) self.stat_calc = NeuralEmbedding(self.net) self.stat_calc_with_scaler = NeuralEmbedding(self.net_with_scaler) self.stat_calc_with_discard_wrapper = NeuralEmbedding( self.net_with_discard_wrapper) # reference input and output torch.random.manual_seed(1) self.tensor = torch.randn(1, 2) self.out = self.net(self.tensor) self.out_discard = self.net_with_discard_wrapper(self.tensor) # try now the statistics rescaling option: mu = Uniform([[-5.0], [5.0]], name='mu') sigma = Uniform([[0.0], [10.0]], name='sigma') # define a Gaussian model self.model = Normal([mu, sigma]) sampler = DrawFromPrior([self.model], BackendDummy(), seed=1) reference_parameters, reference_simulations = sampler.sample_par_sim_pairs( 30, 1) reference_simulations = reference_simulations.reshape( reference_simulations.shape[0], reference_simulations.shape[2]) self.stat_calc_rescaling = NeuralEmbedding( self.net, reference_simulations=reference_simulations, previous_statistics=Identity(degree=2)) if not has_torch: self.assertRaises(ImportError, NeuralEmbedding, None)
def setUp(self): self.mu = Uniform([[-5.0], [5.0]], name='mu') self.sigma = Uniform([[5.0], [10.0]], name='sigma') self.model = Normal([self.mu, self.sigma]) self.model_bivariate = Uniform([[0, 0], [1, 1]], name="model") self.stat_calc = Identity(degree=2, cross=1) self.likfun = PenLogReg(self.stat_calc, [self.model], n_simulate=100, n_folds=10, max_iter=100000, seed=1) self.likfun_wrong_n_sim = PenLogReg(self.stat_calc, [self.model], n_simulate=10, n_folds=10, max_iter=100000, seed=1) self.likfun_bivariate = PenLogReg(self.stat_calc, [self.model_bivariate], n_simulate=100, n_folds=10, max_iter=100000, seed=1) self.y_obs = self.model.forward_simulate(self.model.get_input_values(), 1, rng=np.random.RandomState(1)) self.y_obs_bivariate = self.model_bivariate.forward_simulate( self.model_bivariate.get_input_values(), 1, rng=np.random.RandomState(1)) self.y_obs_double = self.model.forward_simulate( self.model.get_input_values(), 2, rng=np.random.RandomState(1)) self.y_obs_bivariate_double = self.model_bivariate.forward_simulate( self.model_bivariate.get_input_values(), 2, rng=np.random.RandomState(1)) # create fake simulated data self.mu._fixed_values = [1.1] self.sigma._fixed_values = [1.0] self.y_sim = self.model.forward_simulate(self.model.get_input_values(), 100, rng=np.random.RandomState(1)) self.y_sim_bivariate = self.model_bivariate.forward_simulate( self.model_bivariate.get_input_values(), 100, rng=np.random.RandomState(1))
def setUp(self): self.stat_calc1 = Identity(degree = 1, cross = 0) self.stat_calc2 = Identity(degree= 1, cross = 0) self.likfun1 = SynLikelihood(self.stat_calc1) self.likfun2 = SynLikelihood(self.stat_calc2) ## Define Models # define a uniform prior distribution self.mu = Uniform([[-5.0], [5.0]], name='mu') self.sigma = Uniform([[0.0], [10.0]], name='sigma') # define a Gaussian model self.model1 = Normal([self.mu,self.sigma]) self.model2 = Normal([self.mu,self.sigma]) #Check whether wrong sized distnacefuncs gives an error self.assertRaises(ValueError, ProductCombination, [self.model1,self.model2], [self.likfun1]) self.jointapprox_lhd = ProductCombination([self.model1, self.model2], [self.likfun1, self.likfun2])
def test_sample(self): # setup backend backend = BackendDummy() # define a uniform prior distribution mu = Uniform([[-5.0], [5.0]], name='mu') sigma = Uniform([[0.0], [10.0]], name='sigma') # define a Gaussian model self.model = Normal([mu,sigma]) # define sufficient statistics for the model stat_calc = Identity(degree = 2, cross = 0) # create fake observed data #y_obs = self.model.forward_simulate(1, np.random.RandomState(1))[0].tolist() y_obs = [np.array(9.8)] # Define the likelihood function likfun = SynLiklihood(stat_calc) T, n_sample, n_samples_per_param = 1, 10, 100 sampler = PMC([self.model], [likfun], backend, seed = 1) journal = sampler.sample([y_obs], T, n_sample, n_samples_per_param, covFactors = np.array([.1,.1]), iniPoints = None) mu_post_sample, sigma_post_sample, post_weights = np.array(journal.get_parameters()['mu']), np.array(journal.get_parameters()['sigma']), np.array(journal.get_weights()) # Compute posterior mean mu_post_mean, sigma_post_mean = np.average(mu_post_sample, weights=post_weights, axis=0), np.average(sigma_post_sample, weights=post_weights, axis=0) # test shape of sample mu_sample_shape, sigma_sample_shape, weights_sample_shape = np.shape(mu_post_sample), np.shape(mu_post_sample), np.shape(post_weights) self.assertEqual(mu_sample_shape, (10,1)) self.assertEqual(sigma_sample_shape, (10,1)) self.assertEqual(weights_sample_shape, (10,1)) self.assertLess(abs(mu_post_mean - (-3.402868)), 1e-3) self.assertLess(abs(sigma_post_mean - 6.212), 1e-3) self.assertFalse(journal.number_of_simulations == 0) # use the PMC scheme for T = 2 T, n_sample, n_samples_per_param = 2, 10, 100 sampler = PMC([self.model], [likfun], backend, seed = 1) journal = sampler.sample([y_obs], T, n_sample, n_samples_per_param, covFactors = np.array([.1,.1]), iniPoints = None) mu_post_sample, sigma_post_sample, post_weights = np.array(journal.get_parameters()['mu']), np.array(journal.get_parameters()['sigma']), np.array(journal.get_weights()) # Compute posterior mean mu_post_mean, sigma_post_mean = np.average(mu_post_sample, weights=post_weights, axis=0), np.average(sigma_post_sample, weights=post_weights, axis=0) # test shape of sample mu_sample_shape, sigma_sample_shape, weights_sample_shape = np.shape(mu_post_sample), np.shape(mu_post_sample), np.shape(post_weights) self.assertEqual(mu_sample_shape, (10,1)) self.assertEqual(sigma_sample_shape, (10,1)) self.assertEqual(weights_sample_shape, (10,1)) self.assertLess(abs(mu_post_mean - (-3.03325763) ), 1e-3) self.assertLess(abs(sigma_post_mean - 6.92124735), 1e-3) self.assertFalse(journal.number_of_simulations == 0)
def infer_parameters(backend, steps=3, n_sample=250, n_samples_per_param=10, logging_level=logging.WARN): logging.basicConfig(level=logging_level) # define observation for true parameters mean=170, std=15 height_obs = [ 160.82499176, 167.24266737, 185.71695756, 153.7045709, 163.40568812, 140.70658699, 169.59102084, 172.81041696, 187.38782738, 179.66358934, 176.63417241, 189.16082803, 181.98288443, 170.18565017, 183.78493886, 166.58387299, 161.9521899, 155.69213073, 156.17867343, 144.51580379, 170.29847515, 197.96767899, 153.36646527, 162.22710198, 158.70012047, 178.53470703, 170.77697743, 164.31392633, 165.88595994, 177.38083686, 146.67058471763457, 179.41946565658628, 238.02751620619537, 206.22458790620766, 220.89530574344568, 221.04082532837026, 142.25301427453394, 261.37656571434275, 171.63761180867033, 210.28121820385866, 237.29130237612236, 175.75558340169619, 224.54340549862235, 197.42448680731226, 165.88273684581381, 166.55094082844519, 229.54308602661584, 222.99844054358519, 185.30223966014586, 152.69149367593846, 206.94372818527413, 256.35498655339154, 165.43140916577741, 250.19273595481803, 148.87781549665536, 223.05547559193792, 230.03418198709608, 146.13611923127021, 138.24716809523139, 179.26755740864527, 141.21704876815426, 170.89587081800852, 222.96391329259626, 188.27229523693822, 202.67075179617672, 211.75963110985992, 217.45423324370509 ] # define prior from abcpy.continuousmodels import Uniform mu = Uniform([[150], [200]], name='mu') sigma = Uniform([[5], [25]], name='sigma') # define the model from abcpy.continuousmodels import Normal height = Normal([mu, sigma], name='height') # define statistics from abcpy.statistics import Identity statistics_calculator = Identity(degree=2, cross=False) # define distance from abcpy.distances import LogReg distance_calculator = LogReg(statistics_calculator, seed=42) # define sampling scheme from abcpy.inferences import PMCABC sampler = PMCABC([height], [distance_calculator], backend, seed=1) # sample from scheme eps_arr = np.array([.75]) epsilon_percentile = 10 journal = sampler.sample([height_obs], steps, eps_arr, n_sample, n_samples_per_param, epsilon_percentile) return journal
def test_return_value_Student_T(self): N1 = Normal([0.1, 0.01]) N2 = Normal([0.3, N1]) graph = Normal([N1, N2]) Manager = AcceptedParametersManager([graph]) backend = Backend() kernel = JointPerturbationKernel([MultivariateStudentTKernel([N1, N2], df=2)]) Manager.update_broadcast(backend, [[0.4, 0.09], [0.2, 0.008]], np.array([0.5, 0.2])) kernel_parameters = [] for krnl in kernel.kernels: kernel_parameters.append(Manager.get_accepted_parameters_bds_values(krnl.models)) Manager.update_kernel_values(backend, kernel_parameters) mapping, mapping_index = Manager.get_mapping(Manager.model) covs = [[[1, 0], [0, 1]], []] Manager.update_broadcast(backend, accepted_cov_mats=covs) pdf = kernel.pdf(mapping, Manager, Manager.accepted_parameters_bds.value()[1], [0.3, 0.1]) self.assertTrue(isinstance(pdf, float))
def test_return_value(self): B1 = Binomial([10, 0.2]) N1 = Normal([0.1, 0.01]) N2 = Normal([0.3, N1]) graph = Normal([B1, N2]) Manager = AcceptedParametersManager([graph]) backend = Backend() kernel = DefaultKernel([N1, N2, B1]) Manager.update_broadcast(backend, [[2, 0.4, 0.09], [3, 0.2, 0.008]], np.array([0.5, 0.2])) kernel_parameters = [] for krnl in kernel.kernels: kernel_parameters.append(Manager.get_accepted_parameters_bds_values(krnl.models)) Manager.update_kernel_values(backend, kernel_parameters) mapping, mapping_index = Manager.get_mapping(Manager.model) covs = [[[1,0],[0,1]],[]] Manager.update_broadcast(backend, accepted_cov_mats=covs) pdf = kernel.pdf(mapping, Manager, 1, [2,0.3,0.1]) self.assertTrue(isinstance(pdf, float))
def test_Student_T(self): N1 = Normal([0.1, 0.01]) N2 = Normal([0.3, N1]) graph = Normal([N1, N2]) Manager = AcceptedParametersManager([graph]) backend = Backend() kernel = JointPerturbationKernel([MultivariateStudentTKernel([N1, N2], df=2)]) Manager.update_broadcast(backend, [[0.27, 0.097], [0.32, 0.012]], np.array([1, 1])) kernel_parameters = [] for krnl in kernel.kernels: kernel_parameters.append(Manager.get_accepted_parameters_bds_values(krnl.models)) Manager.update_kernel_values(backend, kernel_parameters) covs = kernel.calculate_cov(Manager) print(covs) self.assertTrue(len(covs) == 1) self.assertTrue(len(covs[0]) == 2)
def test(self): B1 = Binomial([10, 0.2]) N1 = Normal([0.1, 0.01]) N2 = Normal([0.3, N1]) graph = Normal([B1, N2]) Manager = AcceptedParametersManager([graph]) backend = Backend() kernel = DefaultKernel([N1, N2, B1]) Manager.update_broadcast(backend, [[2, 0.27, 0.097], [3, 0.32, 0.012]], np.array([1, 1])) kernel_parameters = [] for krnl in kernel.kernels: kernel_parameters.append(Manager.get_accepted_parameters_bds_values(krnl.models)) Manager.update_kernel_values(backend, kernel_parameters) covs = kernel.calculate_cov(Manager) self.assertTrue(len(covs)==2) self.assertTrue(len(covs[0])==2) self.assertTrue(not(covs[1]))
def setUp(self): # define prior and model sigma = Uniform([[10], [20]]) mu = Normal([0, 1]) self.Y = Normal([mu, sigma]) # define backend self.backend = Backend() # define statistics self.statistics_cal = Identity(degree=3, cross=False) if has_torch: # Initialize statistics learning self.statisticslearning = TripletDistanceLearning( [self.Y], self.statistics_cal, self.backend, n_samples=100, n_samples_per_param=1, seed=1, n_epochs=10)