def test_(): true_parameters = np.array([1.0, 5.0, 0.2]) num_simulations = 1000 parameters = np.tile(true_parameters, num_simulations).reshape(-1, 3) observations = Stats().calc(Model().sim(parameters)) print(parameters.shape, observations.shape) utils.plot_hist_marginals(observations, ground_truth=get_ground_truth_observation(), show_xticks=True) plt.show()
def _test(): # prior = MG1Uniform(low=torch.zeros(3), high=torch.Tensor([10, 10, 1 / 3])) # uniform = distributions.Uniform( # low=torch.zeros(3), high=torch.Tensor([10, 10, 1 / 3]) # ) # x = torch.Tensor([10, 20, 1 / 3]).reshape(1, -1) # print(uniform.log_prob(x)) # print(prior.log_prob(x)) d = LotkaVolterraOscillating() samples = d.sample((1000,)) utils.plot_hist_marginals(utils.tensor2numpy(samples), lims=[-6, 3]) plt.show()
def test_(): task = "nonlinear-gaussian" simulator, prior = simulators.get_simulator_and_prior(task) parameter_dim, observation_dim = ( simulator.parameter_dim, simulator.observation_dim, ) true_observation = simulator.get_ground_truth_observation() neural_posterior = utils.get_neural_posterior( "maf", parameter_dim, observation_dim, simulator ) apt = APT( simulator=simulator, true_observation=true_observation, prior=prior, neural_posterior=neural_posterior, num_atoms=-1, use_combined_loss=False, train_with_mcmc=False, mcmc_method="slice-np", summary_net=None, retrain_from_scratch_each_round=False, discard_prior_samples=False, ) num_rounds, num_simulations_per_round = 20, 1000 apt.run_inference( num_rounds=num_rounds, num_simulations_per_round=num_simulations_per_round ) samples = apt.sample_posterior(2500) samples = utils.tensor2numpy(samples) figure = utils.plot_hist_marginals( data=samples, ground_truth=utils.tensor2numpy( simulator.get_ground_truth_parameters() ).reshape(-1), lims=simulator.parameter_plotting_limits, ) figure.savefig(os.path.join(utils.get_output_root(), "corner-posterior-apt.pdf")) samples = apt.sample_posterior_mcmc(num_samples=1000) samples = utils.tensor2numpy(samples) figure = utils.plot_hist_marginals( data=samples, ground_truth=utils.tensor2numpy( simulator.get_ground_truth_parameters() ).reshape(-1), lims=simulator.parameter_plotting_limits, ) figure.savefig( os.path.join(utils.get_output_root(), "corner-posterior-apt-mcmc.pdf") )
def test_(): # if torch.cuda.is_available(): # device = torch.device("cuda") # torch.set_default_tensor_type("torch.cuda.FloatTensor") # else: # input("CUDA not available, do you wish to continue?") # device = torch.device("cpu") # torch.set_default_tensor_type("torch.FloatTensor") loc = torch.Tensor([0, 0]) covariance_matrix = torch.Tensor([[1, 0.99], [0.99, 1]]) likelihood = distributions.MultivariateNormal( loc=loc, covariance_matrix=covariance_matrix) bound = 1.5 low, high = -bound * torch.ones(2), bound * torch.ones(2) prior = distributions.Uniform(low=low, high=high) # def potential_function(inputs_dict): # parameters = next(iter(inputs_dict.values())) # return -(likelihood.log_prob(parameters) + prior.log_prob(parameters).sum()) prior = distributions.Uniform(low=-5 * torch.ones(4), high=2 * torch.ones(4)) from nsf import distributions as distributions_ likelihood = distributions_.LotkaVolterraOscillating() potential_function = PotentialFunction(likelihood, prior) # kernel = Slice(potential_function=potential_function) from pyro.infer.mcmc import HMC, NUTS # kernel = HMC(potential_fn=potential_function) kernel = NUTS(potential_fn=potential_function) num_chains = 3 sampler = MCMC( kernel=kernel, num_samples=10000 // num_chains, warmup_steps=200, initial_params={"": torch.zeros(num_chains, 4)}, num_chains=num_chains, ) sampler.run() samples = next(iter(sampler.get_samples().values())) utils.plot_hist_marginals(utils.tensor2numpy(samples), ground_truth=utils.tensor2numpy(loc), lims=[-6, 3]) # plt.show() plt.savefig("/home/conor/Dropbox/phd/projects/lfi/out/mcmc.pdf") plt.close()
def sample_true_posterior(): prior = distributions.Uniform(low=-3 * torch.ones(5), high=3 * torch.ones(5)) # print(log_prob) potential_function = (lambda parameters: simulator.log_prob( observations=true_observation, parameters=parameters) + prior.log_prob( torch.Tensor(parameters)).sum().item()) sampler = SliceSampler(x=true_parameters, lp_f=potential_function, thin=10) sampler.gen(200) samples = sampler.gen(2500) # figure = corner.corner( # samples, # truths=true_parameters, # truth_color='C1', # bins=25, # color='black', # labels=[r'$ \theta_{1} $', r'$ \theta_{2} $', r'$ \theta_{3} $', # r'$ \theta_{4} $', r'$ \theta_{5} $'], # show_titles=True, # hist_kwargs={'color': 'grey', 'fill': True}, # title_fmt='.2f', # plot_contours=True, # quantiles=[0.5] # ) # plt.tight_layout() figure = utils.plot_hist_marginals(samples, ground_truth=true_parameters, lims=[-4, 4]) np.save( os.path.join(utils.get_output_root(), "./true-posterior-samples-gaussian.npy"), samples, ) plt.show()
def main(): task = "mg1" simulator, prior = simulators.get_simulator_and_prior(task) parameter_dim, observation_dim = ( simulator.parameter_dim, simulator.observation_dim, ) true_observation = simulator.get_ground_truth_observation() neural_likelihood = utils.get_neural_likelihood("maf", parameter_dim, observation_dim) snl = SNL( simulator=simulator, true_observation=true_observation, prior=prior, neural_likelihood=neural_likelihood, mcmc_method="slice-np", ) num_rounds, num_simulations_per_round = 10, 1000 snl.run_inference(num_rounds=num_rounds, num_simulations_per_round=num_simulations_per_round) samples = snl.sample_posterior(1000) samples = utils.tensor2numpy(samples) figure = utils.plot_hist_marginals( data=samples, ground_truth=utils.tensor2numpy( simulator.get_ground_truth_parameters()).reshape(-1), lims=simulator.parameter_plotting_limits, ) figure.savefig("./corner-posterior-snl.pdf")
def test_(): num_simulations = 250 true_parameters = np.log([0.01, 0.5, 1.0, 0.01]) parameters, observations = sim_data( gen_params=lambda num_simulations, rng: np.tile( true_parameters, num_simulations).reshape(-1, 4), sim_model=lambda parameters, rng: Stats().calc(Model().sim(parameters, rng=rng)), n_samples=num_simulations, ) print(parameters.shape, observations.shape) # utils.plot_hist_marginals(parameters, ground_truth=np.log([0.01, 0.5, 1.0, 0.01])) utils.plot_hist_marginals(observations, show_xticks=True, ground_truth=get_ground_truth_observation()) plt.show()
def test_(): task = "lotka-volterra" simulator, prior = simulators.get_simulator_and_prior(task) parameter_dim, observation_dim = ( simulator.parameter_dim, simulator.observation_dim, ) true_observation = simulator.get_ground_truth_observation() classifier = utils.get_classifier("mlp", parameter_dim, observation_dim) ratio_estimator = SRE( simulator=simulator, true_observation=true_observation, classifier=classifier, prior=prior, num_atoms=-1, mcmc_method="slice-np", retrain_from_scratch_each_round=False, ) num_rounds, num_simulations_per_round = 10, 1000 ratio_estimator.run_inference( num_rounds=num_rounds, num_simulations_per_round=num_simulations_per_round) samples = ratio_estimator.sample_posterior(num_samples=2500) samples = utils.tensor2numpy(samples) figure = utils.plot_hist_marginals( data=samples, ground_truth=utils.tensor2numpy( simulator.get_ground_truth_parameters()).reshape(-1), lims=[-4, 4], ) figure.savefig( os.path.join(utils.get_output_root(), "corner-posterior-ratio.pdf")) mmds = ratio_estimator.summary["mmds"] if mmds: figure, axes = plt.subplots(1, 1) axes.plot( np.arange(0, num_rounds * num_simulations_per_round, num_simulations_per_round), np.array(mmds), "-o", linewidth=2, ) figure.savefig(os.path.join(utils.get_output_root(), "mmd-ratio.pdf"))
def _summarize(self, round_): # Update summaries. try: mmd = utils.unbiased_mmd_squared( self._parameter_bank[-1], self._simulator.get_ground_truth_posterior_samples( num_samples=1000), ) self._summary["mmds"].append(mmd.item()) except: pass median_observation_distance = torch.median( torch.sqrt( torch.sum( (self._observation_bank[-1] - self._true_observation.reshape(1, -1))**2, dim=-1, ))) self._summary["median-observation-distances"].append( median_observation_distance.item()) negative_log_prob_true_parameters = -utils.gaussian_kde_log_eval( samples=self._parameter_bank[-1], query=self._simulator.get_ground_truth_parameters().reshape(1, -1), ) self._summary["negative-log-probs-true-parameters"].append( negative_log_prob_true_parameters.item()) # Plot most recently sampled parameters in TensorBoard. parameters = utils.tensor2numpy(self._parameter_bank[-1]) figure = utils.plot_hist_marginals( data=parameters, ground_truth=utils.tensor2numpy( self._simulator.get_ground_truth_parameters()).reshape(-1), lims=self._simulator.parameter_plotting_limits, ) self._summary_writer.add_figure(tag="posterior-samples", figure=figure, global_step=round_ + 1) self._summary_writer.add_scalar( tag="epochs-trained", scalar_value=self._summary["epochs"][-1], global_step=round_ + 1, ) self._summary_writer.add_scalar( tag="best-validation-log-prob", scalar_value=self._summary["best-validation-log-probs"][-1], global_step=round_ + 1, ) self._summary_writer.add_scalar( tag="median-observation-distance", scalar_value=self._summary["median-observation-distances"][-1], global_step=round_ + 1, ) self._summary_writer.add_scalar( tag="negative-log-prob-true-parameters", scalar_value=self._summary["negative-log-probs-true-parameters"] [-1], global_step=round_ + 1, ) if self._summary["mmds"]: self._summary_writer.add_scalar( tag="mmd", scalar_value=self._summary["mmds"][-1], global_step=round_ + 1, ) self._summary_writer.flush()