def _test(distribution, bijector, n): x = TransformedDistribution(distribution=distribution, bijector=bijector, validate_args=True) val_est = get_dims(x.sample(n)) val_true = n + get_dims(distribution.mean()) assert val_est == val_true
def test_auto_transform_true(self): with self.test_session() as sess: # Match normal || softplus-inverse-normal distribution with # automated transformation on latter (assuming it is softplus). x = TransformedDistribution( distribution=Normal(0.0, 0.5), bijector=tf.contrib.distributions.bijectors.Softplus()) x.support = 'nonnegative' qx = Normal(loc=tf.Variable(tf.random_normal([])), scale=tf.nn.softplus(tf.Variable(tf.random_normal( [])))) inference = ed.KLqp({x: qx}) inference.initialize(auto_transform=True, n_samples=5, n_iter=1000) tf.global_variables_initializer().run() for _ in range(inference.n_iter): info_dict = inference.update() # Check approximation on constrained space has same moments as # target distribution. n_samples = 10000 x_mean, x_var = tf.nn.moments(x.sample(n_samples), 0) x_unconstrained = inference.transformations[x] qx_constrained = transform( qx, bijectors.Invert(x_unconstrained.bijector)) qx_mean, qx_var = tf.nn.moments(qx_constrained.sample(n_samples), 0) stats = sess.run([x_mean, qx_mean, x_var, qx_var]) self.assertAllClose(info_dict['loss'], 0.0, rtol=0.2, atol=0.2) self.assertAllClose(stats[0], stats[1], rtol=1e-1, atol=1e-1) self.assertAllClose(stats[2], stats[3], rtol=1e-1, atol=1e-1)
def test_auto_transform_true(self): with self.test_session() as sess: # Match normal || softplus-inverse-normal distribution with # automated transformation on latter (assuming it is softplus). x = TransformedDistribution( distribution=Normal(0.0, 0.5), bijector=tf.contrib.distributions.bijectors.Softplus()) x.support = 'nonnegative' qx = Normal(loc=tf.Variable(tf.random_normal([])), scale=tf.nn.softplus(tf.Variable(tf.random_normal([])))) inference = ed.KLqp({x: qx}) inference.initialize(auto_transform=True, n_samples=5, n_iter=1000) tf.global_variables_initializer().run() for _ in range(inference.n_iter): info_dict = inference.update() # Check approximation on constrained space has same moments as # target distribution. n_samples = 10000 x_mean, x_var = tf.nn.moments(x.sample(n_samples), 0) x_unconstrained = inference.transformations[x] qx_constrained = transform(qx, bijectors.Invert(x_unconstrained.bijector)) qx_mean, qx_var = tf.nn.moments(qx_constrained.sample(n_samples), 0) stats = sess.run([x_mean, qx_mean, x_var, qx_var]) self.assertAllClose(info_dict['loss'], 0.0, rtol=0.2, atol=0.2) self.assertAllClose(stats[0], stats[1], rtol=1e-1, atol=1e-1) self.assertAllClose(stats[2], stats[3], rtol=1e-1, atol=1e-1)
def test_hmc_default(self): with self.test_session() as sess: x = TransformedDistribution( distribution=Normal(1.0, 1.0), bijector=tf.contrib.distributions.bijectors.Softplus()) x.support = 'nonnegative' inference = ed.HMC([x]) inference.initialize(auto_transform=True, step_size=0.8) tf.global_variables_initializer().run() for _ in range(inference.n_iter): info_dict = inference.update() inference.print_progress(info_dict) # Check approximation on constrained space has same moments as # target distribution. n_samples = 10000 x_unconstrained = inference.transformations[x] qx = inference.latent_vars[x_unconstrained] qx_constrained = Empirical( x_unconstrained.bijector.inverse(qx.params)) x_mean, x_var = tf.nn.moments(x.sample(n_samples), 0) qx_mean, qx_var = tf.nn.moments(qx_constrained.params[500:], 0) stats = sess.run([x_mean, qx_mean, x_var, qx_var]) self.assertAllClose(stats[0], stats[1], rtol=1e-1, atol=1e-1) self.assertAllClose(stats[2], stats[3], rtol=1e-1, atol=1e-1)
def _test(base_dist_cls, transform, inverse, log_det_jacobian, n, **base_dist_args): x = TransformedDistribution(base_dist_cls=base_dist_cls, transform=transform, inverse=inverse, log_det_jacobian=log_det_jacobian, **base_dist_args) val_est = get_dims(x.sample(n)) val_true = n + get_dims(base_dist_args['mu']) assert val_est == val_true
def _test(base_dist_cls, transform, inverse, log_det_jacobian, n, **base_dist_args): x = TransformedDistribution( base_dist_cls=base_dist_cls, transform=transform, inverse=inverse, log_det_jacobian=log_det_jacobian, **base_dist_args ) val_est = get_dims(x.sample(n)) val_true = n + get_dims(base_dist_args["mu"]) assert val_est == val_true
def test_hmc_default(self): with self.test_session() as sess: x = TransformedDistribution( distribution=Normal(1.0, 1.0), bijector=tf.contrib.distributions.bijectors.Softplus()) x.support = 'nonnegative' inference = ed.HMC([x]) inference.initialize(auto_transform=True, step_size=0.8) tf.global_variables_initializer().run() for _ in range(inference.n_iter): info_dict = inference.update() inference.print_progress(info_dict) # Check approximation on constrained space has same moments as # target distribution. n_samples = 1000 qx_constrained = inference.latent_vars[x] x_mean, x_var = tf.nn.moments(x.sample(n_samples), 0) qx_mean, qx_var = tf.nn.moments(qx_constrained.params[500:], 0) stats = sess.run([x_mean, qx_mean, x_var, qx_var]) self.assertAllClose(stats[0], stats[1], rtol=1e-1, atol=1e-1) self.assertAllClose(stats[2], stats[3], rtol=1e-1, atol=1e-1)
def test_hmc_custom(self): with self.test_session() as sess: x = TransformedDistribution( distribution=Normal(1.0, 1.0), bijector=tf.contrib.distributions.bijectors.Softplus()) x.support = 'nonnegative' qx = Empirical(tf.Variable(tf.random_normal([1000]))) inference = ed.HMC({x: qx}) inference.initialize(auto_transform=True, step_size=0.8) tf.global_variables_initializer().run() for _ in range(inference.n_iter): info_dict = inference.update() # Check approximation on constrained space has same moments as # target distribution. n_samples = 10000 x_unconstrained = inference.transformations[x] qx_constrained_params = x_unconstrained.bijector.inverse(qx.params) x_mean, x_var = tf.nn.moments(x.sample(n_samples), 0) qx_mean, qx_var = tf.nn.moments(qx_constrained_params[500:], 0) stats = sess.run([x_mean, qx_mean, x_var, qx_var]) self.assertAllClose(stats[0], stats[1], rtol=1e-1, atol=1e-1) self.assertAllClose(stats[2], stats[3], rtol=1e-1, atol=1e-1)
n_rep = 100 # number of replicated datasets we generate holdout_gen = np.zeros((n_rep, x_train.shape[0], x_train.shape[1])) for i in range(n_rep): x_generated = x_post.sample().eval() # look only at the heldout entries holdout_gen[i] = np.multiply(x_generated, holdout_mask) n_eval = 10 # we draw samples from the inferred Z and W obs_ll = [] rep_ll = [] pbar = Progbar(n_eval) for j in range(n_eval): U_sample = U_post.sample().eval() V_sample = V_post.sample().eval() holdoutmean_sample = np.multiply(U_sample.dot(V_sample.T), holdout_mask) obs_ll.append( np.mean(np.ma.masked_invalid( stats.poisson.logpmf(np.array(x_vad, dtype=int), holdoutmean_sample)), axis=1)) rep_ll.append( np.mean(np.ma.masked_invalid( stats.poisson.logpmf(holdout_gen, holdoutmean_sample)), axis=2)) pbar.update(j)