def testSignAdaptation(self): if not tf.executing_eagerly(): return new_control = fun_mcmc.sign_adaptation( control=1., output=0.5, set_point=1., adaptation_rate=0.1) self.assertAllClose(new_control, 1. / 1.1) new_control = fun_mcmc.sign_adaptation( control=1., output=0.5, set_point=0., adaptation_rate=0.1) self.assertAllClose(new_control, 1. * 1.1)
def testSignAdaptation(self): new_control = fun_mcmc.sign_adaptation(control=1., output=0.5, set_point=1., adaptation_rate=0.1) self.assertAllClose(new_control, 1. / 1.1) new_control = fun_mcmc.sign_adaptation(control=1., output=0.5, set_point=0., adaptation_rate=0.1) self.assertAllClose(new_control, 1. * 1.1)
def kernel(hmc_state, step_size, step): hmc_state, hmc_extra = fun_mcmc.hamiltonian_monte_carlo( hmc_state, step_size=step_size, num_integrator_steps=num_leapfrog_steps, target_log_prob_fn=target_log_prob_fn) rate = tf.compat.v1.train.polynomial_decay( 0.01, global_step=step, power=0.5, decay_steps=num_adapt_steps, end_learning_rate=0.) mean_p_accept = tf.reduce_mean(input_tensor=tf.exp( tf.minimum(0., hmc_extra.log_accept_ratio))) step_size = fun_mcmc.sign_adaptation(step_size, output=mean_p_accept, set_point=0.9, adaptation_rate=rate) return (hmc_state, step_size, step + 1), hmc_extra