def _setup_sbi(self, prior: Distribution, rollouts_real: Optional[List[StepSequence]] = None): """ Prepare simulator and prior for usage in sbi. :param prior: distribution used by sbi as a prior :param rollouts_real: list of rollouts recorded from the target domain, which are used to sync the simulations' initial states """ rollout_sampler = SimRolloutSamplerForSBI( self._env_sim_sbi, self._policy, self.dp_mapping, self._embedding, self.num_segments, self.len_segments, self.stop_on_done, rollouts_real, self.use_rec_act, ) # Call sbi's preparation function self._sbi_simulator, self._sbi_prior = prepare_for_sbi( rollout_sampler, prior)
def test_inference_with_user_sbi_problems( snpe_method: type, user_simulator: Callable, user_prior ): """ Test inference with combinations of user defined simulators, priors and x_os. """ simulator, prior = prepare_for_sbi(user_simulator, user_prior) inference = snpe_method( prior, density_estimator="mdn_snpe_a" if snpe_method == SNPE_A else "maf", show_progress_bars=False, ) # Run inference. theta, x = simulate_for_sbi(simulator, prior, 100) _ = inference.append_simulations(theta, x).train(max_num_epochs=2) # Build posterior. if snpe_method == SNPE_A: if not isinstance(prior, (MultivariateNormal, BoxUniform, DirectPosterior)): with pytest.raises(AssertionError): # SNPE-A does not support priors yet. _ = inference.build_posterior() else: _ = inference.build_posterior() else: _ = inference.build_posterior()
def test_train_with_different_data_and_training_device( snpe_method: type, data_device, training_device ): assert torch.cuda.is_available(), "gpu geared test has no GPU available" num_dim = 2 # simulator, prior = prepare_for_sbi(user_simulator, user_prior) prior_ = MultivariateNormal( loc=torch.zeros(num_dim), covariance_matrix=torch.eye(num_dim) ) simulator, prior = prepare_for_sbi(diagonal_linear_gaussian, prior_) inference = snpe_method( prior, density_estimator="mdn_snpe_a" if snpe_method == SNPE_A else "maf", show_progress_bars=False, device=training_device, ) # Run inference. theta, x = simulate_for_sbi(simulator, prior, 100) theta, x = theta.to(data_device), x.to(data_device) inference = inference.append_simulations(theta, x) _ = inference.train(max_num_epochs=2) # Check for default device for inference object weights_device = next(inference._neural_net.parameters()).device assert torch.device(training_device) == weights_device _ = inference.build_posterior()
def test_nograd_after_inference_train(inference_method) -> None: num_dim = 2 prior_ = BoxUniform(-torch.ones(num_dim), torch.ones(num_dim)) simulator, prior = prepare_for_sbi(diagonal_linear_gaussian, prior_) inference = inference_method( prior, **( dict(classifier="resnet") if inference_method in [SNRE_A, SNRE_B] else dict( density_estimator=( "mdn_snpe_a" if inference_method == SNPE_A else "maf" ) ) ), show_progress_bars=False, ) theta, x = simulate_for_sbi(simulator, prior, 32) inference = inference.append_simulations(theta, x) posterior_estimator = inference.train(max_num_epochs=2) def check_no_grad(model): for p in model.parameters(): assert p.grad is None check_no_grad(posterior_estimator) check_no_grad(inference._neural_net)
def infer( simulator: Callable, prior: Distribution, method: str, num_simulations: int, num_workers: int = 1, ) -> NeuralPosterior: r"""Runs simulation-based inference and returns the posterior. This function provides a simple interface to run sbi. Inference is run for a single round and hence the returned posterior $p(\theta|x)$ can be sampled and evaluated for any $x$ (i.e. it is amortized). The scope of this function is limited to the most essential features of sbi. For more flexibility (e.g. multi-round inference, different density estimators) please use the flexible interface described here: https://www.mackelab.org/sbi/tutorial/02_flexible_interface/ Args: simulator: A function that takes parameters $\theta$ and maps them to simulations, or observations, `x`, $\mathrm{sim}(\theta)\to x$. Any regular Python callable (i.e. function or class with `__call__` method) can be used. prior: A probability distribution that expresses prior knowledge about the parameters, e.g. which ranges are meaningful for them. Any object with `.log_prob()`and `.sample()` (for example, a PyTorch distribution) can be used. method: What inference method to use. Either of SNPE, SNLE or SNRE. num_simulations: Number of simulation calls. More simulations means a longer runtime, but a better posterior estimate. num_workers: Number of parallel workers to use for simulations. Returns: Posterior over parameters conditional on observations (amortized). """ try: method_fun: Callable = getattr(sbi.inference, method.upper()) except AttributeError: raise NameError( "Method not available. `method` must be one of 'SNPE', 'SNLE', 'SNRE'." ) simulator, prior = prepare_for_sbi(simulator, prior) inference = method_fun(prior=prior) theta, x = simulate_for_sbi( simulator=simulator, proposal=prior, num_simulations=num_simulations, num_workers=num_workers, ) _ = inference.append_simulations(theta, x).train() posterior = inference.build_posterior() return posterior
def test_simulate_in_batches( num_sims, batch_size, simulator, prior=BoxUniform(zeros(5), ones(5)), ): """Test combinations of num_sims and simulation_batch_size. """ simulator, prior = prepare_for_sbi(simulator, prior) theta = prior.sample((num_sims, )) simulate_in_batches(simulator, theta, batch_size)
def test_inference_with_user_sbi_problems(user_simulator: Callable, user_prior): """ Test inference with combinations of user defined simulators, priors and x_os. """ simulator, prior = prepare_for_sbi(user_simulator, user_prior) inference = SNPE_C(prior, density_estimator="maf", show_progress_bars=False,) # Run inference. theta, x = simulate_for_sbi(simulator, prior, 100) _ = inference.append_simulations(theta, x).train(max_num_epochs=2) _ = inference.build_posterior()
def test_train_with_different_data_and_training_device( inference_method, data_device: str, training_device: str ) -> None: assert torch.cuda.is_available(), "this test requires that cuda is available." num_dim = 2 prior_ = BoxUniform( -torch.ones(num_dim), torch.ones(num_dim), device=training_device ) simulator, prior = prepare_for_sbi(diagonal_linear_gaussian, prior_) inference = inference_method( prior, **( dict(classifier="resnet") if inference_method in [SNRE_A, SNRE_B] else dict( density_estimator=( "mdn_snpe_a" if inference_method == SNPE_A else "maf" ) ) ), show_progress_bars=False, device=training_device, ) theta, x = simulate_for_sbi(simulator, prior, 32) theta, x = theta.to(data_device), x.to(data_device) x_o = torch.zeros(x.shape[1]) inference = inference.append_simulations(theta, x) posterior_estimator = inference.train(max_num_epochs=2) # Check for default device for inference object weights_device = next(inference._neural_net.parameters()).device assert torch.device(training_device) == weights_device _ = DirectPosterior( posterior_estimator=posterior_estimator, prior=prior ).set_default_x(x_o)
def test_pair_plot_scatter( env: SimEnv, policy: Policy, layout: str, labels: Optional[str], legend_labels: Optional[str], axis_limits: Optional[str], use_kde: bool, use_trafo: bool, ): def _simulator(dp: to.Tensor) -> to.Tensor: """The most simple interface of a simulation to sbi, using `env` and `policy` from outer scope""" ro = rollout( env, policy, eval=True, reset_kwargs=dict(domain_param=dict(m=dp[0], k=dp[1], d=dp[2]))) observation_sim = to.from_numpy( ro.observations[-1]).to(dtype=to.float32) return to.atleast_2d(observation_sim) # Fix the init state env.init_space = SingularStateSpace(env.init_space.sample_uniform()) env_real = deepcopy(env) env_real.domain_param = {"mass": 0.8, "stiffness": 15, "d": 0.7} # Optionally transformed domain parameters for inference if use_trafo: env = LogDomainParamTransform(env, mask=["stiffness"]) # Domain parameter mapping and prior dp_mapping = {0: "mass", 1: "stiffness", 2: "d"} k_low = np.log(10) if use_trafo else 10 k_up = np.log(20) if use_trafo else 20 prior = sbiutils.BoxUniform(low=to.tensor([0.5, k_low, 0.2]), high=to.tensor([1.5, k_up, 0.8])) # Learn a likelihood from the simulator density_estimator = sbiutils.posterior_nn(model="maf", hidden_features=10, num_transforms=3) snpe = SNPE(prior, density_estimator) simulator, prior = prepare_for_sbi(_simulator, prior) domain_param, data_sim = simulate_for_sbi(simulator=simulator, proposal=prior, num_simulations=50, num_workers=1) snpe.append_simulations(domain_param, data_sim) density_estimator = snpe.train(max_num_epochs=5) posterior = snpe.build_posterior(density_estimator) # Create a fake (random) true domain parameter domain_param_gt = to.tensor([ env_real.domain_param[dp_mapping[key]] for key in sorted(dp_mapping.keys()) ]) domain_param_gt += domain_param_gt * to.randn(len(dp_mapping)) / 10 domain_param_gt = domain_param_gt.unsqueeze(0) data_real = simulator(domain_param_gt) domain_params, log_probs = SBIBase.eval_posterior( posterior, data_real, num_samples=6, normalize_posterior=False, subrtn_sbi_sampling_hparam=dict(sample_with_mcmc=False), ) dp_samples = [ domain_params.reshape(1, -1, domain_params.shape[-1]).squeeze() ] if layout == "inside": num_rows, num_cols = len(dp_mapping), len(dp_mapping) else: num_rows, num_cols = len(dp_mapping) + 1, len(dp_mapping) + 1 _, axs = plt.subplots(num_rows, num_cols, figsize=(8, 8), tight_layout=True) fig = draw_posterior_pairwise_scatter( axs=axs, dp_samples=dp_samples, dp_mapping=dp_mapping, prior=prior if axis_limits == "use_prior" else None, env_sim=env, env_real=env_real, axis_limits=axis_limits, marginal_layout=layout, labels=labels, legend_labels=legend_labels, use_kde=use_kde, ) assert fig is not None
def test_pair_plot( env: SimEnv, policy: Policy, layout: str, labels: Optional[str], prob_labels: Optional[str], use_prior: bool, use_trafo: bool, ): def _simulator(dp: to.Tensor) -> to.Tensor: """The most simple interface of a simulation to sbi, using `env` and `policy` from outer scope""" ro = rollout( env, policy, eval=True, reset_kwargs=dict(domain_param=dict(m=dp[0], k=dp[1], d=dp[2]))) observation_sim = to.from_numpy( ro.observations[-1]).to(dtype=to.float32) return to.atleast_2d(observation_sim) # Fix the init state env.init_space = SingularStateSpace(env.init_space.sample_uniform()) env_real = deepcopy(env) env_real.domain_param = {"mass": 0.8, "stiffness": 35, "d": 0.7} # Optionally transformed domain parameters for inference if use_trafo: env = SqrtDomainParamTransform(env, mask=["stiffness"]) # Domain parameter mapping and prior dp_mapping = {0: "mass", 1: "stiffness", 2: "d"} prior = sbiutils.BoxUniform(low=to.tensor([0.5, 20, 0.2]), high=to.tensor([1.5, 40, 0.8])) # Learn a likelihood from the simulator density_estimator = sbiutils.posterior_nn(model="maf", hidden_features=10, num_transforms=3) snpe = SNPE(prior, density_estimator) simulator, prior = prepare_for_sbi(_simulator, prior) domain_param, data_sim = simulate_for_sbi(simulator=simulator, proposal=prior, num_simulations=50, num_workers=1) snpe.append_simulations(domain_param, data_sim) density_estimator = snpe.train(max_num_epochs=5) posterior = snpe.build_posterior(density_estimator) # Create a fake (random) true domain parameter domain_param_gt = to.tensor( [env_real.domain_param[key] for _, key in dp_mapping.items()]) domain_param_gt += domain_param_gt * to.randn(len(dp_mapping)) / 5 domain_param_gt = domain_param_gt.unsqueeze(0) data_real = simulator(domain_param_gt) # Get a (random) condition condition = Embedding.pack(domain_param_gt.clone()) if layout == "inside": num_rows, num_cols = len(dp_mapping), len(dp_mapping) else: num_rows, num_cols = len(dp_mapping) + 1, len(dp_mapping) + 1 if use_prior: grid_bounds = None else: prior = None grid_bounds = to.cat( [to.zeros((len(dp_mapping), 1)), to.ones((len(dp_mapping), 1))], dim=1) _, axs = plt.subplots(num_rows, num_cols, figsize=(14, 14), tight_layout=True) fig = draw_posterior_pairwise_heatmap( axs, posterior, data_real, dp_mapping, condition, prior=prior, env_real=env_real, marginal_layout=layout, grid_bounds=grid_bounds, grid_res=100, normalize_posterior=False, rescale_posterior=True, labels=None if labels is None else [""] * len(dp_mapping), prob_labels=prob_labels, ) assert fig is not None