def build_custom_post_net(batch_theta, batch_x): flow_lik, flow_post = func.set_up_networks(model.prior.base_dist.low, model.prior.base_dist.high, dim_post=model.nbr_params) return flow_post
# w_sim_wrapper = lambda param: torch.as_tensor(func.whiten(simulator_wrapper(param), whiteness_params)) def simulator(params): N = params.shape[0] data = torch.zeros(params.shape[0], 19) for i in range(N): data[i, :] = torch.as_tensor( func.whiten(simulator_wrapper(params[i, :]), whiteness_params)) return data flow_lik, flow_post = func.set_up_networks(model.prior.low, model.prior.high, dim_post=model.nbr_params) # setting for not exteded: # decay_rate_post = 0.95 # prob_prior_decay_rate = 0.9 # 1000, 10000 # setting for exteded: # decay_rate_post = 0.9 # prob_prior_decay_rate = 0.9 # 2000, 10000 optimizer_lik = torch.optim.Adam(flow_lik.parameters()) optimizer_post = torch.optim.Adam(flow_post.parameters(), lr=0.001,
def build_custom_like_net(batch_theta, batch_x): flow_lik, flow_post = func.set_up_networks(seed) return flow_lik
s_of_theta = model.model_sim(theta, True) return func.normalize_summary_stats(s_of_theta, m_s_of_prior, s_s_of_prior) hyper_params = [0.001, 0.001, 0.9, 0.9] # lr_like, lr_post, gamma_post, gamma if lambda_val > 0: hyper_params[-1] = lambda_val if hp_tuning >= 2: hyper_params = func.sample_hp("snpla", hp_tuning) print(hyper_params) flow_lik, flow_post = func.set_up_networks() optimizer_lik = torch.optim.Adam(flow_lik.parameters(), lr=hyper_params[0]) optimizer_post = torch.optim.Adam(flow_post.parameters(), lr=hyper_params[1], weight_decay=0.0) # used def value before decay_rate_post = hyper_params[2] # no adaptation of Adam's base rate nbr_rounds = 5 prob_prior_decay_rate = hyper_params[3] prob_prior = snpla.calc_prob_prior(nbr_rounds, prob_prior_decay_rate) print(prob_prior) nbr_lik = [1000, 1000, 1000, 1000, 1000] # , 2000, 2000] nbr_epochs_lik = [50, 50, 50, 50, 50] batch_size = 25 batch_size_post = 2000 nbr_post = [10000, 10000, 10000, 10000, 10000] # , 10000, 10000]