def testVerifyTrainableVariables(self): num_levels = 3 flow = GlowFlow( level=num_levels, level_depth=2, validate_args=False) event_dims = (64, 64, 8) x = tf.random_uniform( (self.batch_size, ) + event_dims, dtype=tf.float32) flow.forward(x) trainable_variables = tf.trainable_variables() raise NotImplementedError( "Should test that the trainable variables match expectation") with self.test_session(): self.assertMatchSnapshot(trainable_variables)
def testSingleLevelShapes(self): num_levels = 1 flow = GlowFlow( level=num_levels, level_depth=2, validate_args=False) event_dims = (64, 64, 8) x = tf.random_uniform( (self.batch_size, ) + event_dims, dtype=tf.float32) z = flow.forward(x) self.assertEqual(x.shape, z.shape)
def testBijective(self): flow = GlowFlow(level=3, level_depth=3, validate_args=False) x = tf.random_uniform( (self.batch_size, ) + self.event_dims, dtype=tf.float32) z = flow.inverse(x) x_ = flow.forward(tf.identity(z)) assert z.shape == x.shape with self.test_session(): self.assertAllEqual(x, x_.numpy())
def testParallelPassthrough(self): num_levels = 3 flow = GlowFlow(num_levels=num_levels, level_depth=2, validate_args=False) event_dims = (64, 64, 8) x = tf.random_uniform((self.batch_size, ) + event_dims, dtype=tf.float32) z = flow.forward(x) for level in range(num_levels): # TODO: Test somehow that all the passthrough values match # expectation pass
def testParallelPassthrough(self): num_levels = 3 flow = GlowFlow( level=num_levels, level_depth=2, validate_args=False) event_dims = (64, 64, 8) x = tf.random_uniform( (self.batch_size, ) + event_dims, dtype=tf.float32) z = flow.forward(x) z2 = tfb.Chain(list(reversed( flow.flow_steps[:2] + flow.flow_steps[-1:] ))).forward(x) self.assertNotEqual(z, z2) self.assertTrue(tf.reduce_all( tf.equal(z[..., :event_dims[-1]/2], z2[..., :event_dims[-1]/2]) ))
def testForward(self): flow = GlowFlow(level=3, level_depth=3, validate_args=False) x = tf.random_uniform( (self.batch_size, ) + self.event_dims, dtype=tf.float32) z = flow.forward(x)
def model_fn(features, labels, mode, params, config): """Build the glow flow model function for use in an estimator. Arguments: features: The input features for the estimator. labels: The labels, unused here. mode: Signifies whether it is train or test or predict. params: Some hyperparameters as a dictionary. config: The RunConfig, unused here. Returns: EstimatorSpec: A tf.estimator.EstimatorSpec instance. """ base_distribution = tfd.MultivariateNormalDiag( loc=tf.zeros(features.shape[-3:]), scale_diag=tf.ones(features.shape[-3:])) glow_flow = GlowFlow(level=params['num_levels'], level_depth=params['level_depth']) transformed_glow_flow = tfd.TransformedDistribution( distribution=base_distribution, bijector=glow_flow, name="transformed_glow_flow") image_tile_summary("input", tf.to_float(features), rows=1, cols=16) z = glow_flow.forward(features) prior_log_probs = base_distribution.log_prob(z) prior_log_likelihood = -tf.reduce_mean(prior_log_probs) log_det_jacobians = glow_flow.log_det_jacobians(features) log_probs = log_det_jacobians + log_det_jacobians # Sanity check, remove when tested assert tf.equal(log_probs, transformed_glow_flow.log_prob(features)) negative_log_likelihood = -tf.reduce_mean(log_probs) bpd = bits_per_dim(negative_log_likelihood, features.shape[-3:]) loss = negative_log_likelihood tf.summary.scalar("negative_log_likelihood", tf.reshape(negative_log_likelihood, [])) tf.summary.scalar("bit_per_dim", tf.reshape(bpd, [])) # TODO: prior likelihood and log det jacobians? # tf.summary.scalar("prior_ll", tf.reshape(tf.reduce_mean(prior_ll), [])) z_l2 = tf.norm(z, axis=1) z_l2_mean, z_l2_var = tf.nn.moments(z_l2) log_det_jacobians_mean, log_det_jacobians_var = tf.nn.moments( log_det_jacobians) prior_log_likelihood_mean, prior_log_likelihood_var = tf.nn.moments( prior_log_likelihood) tf.summary.scalar("log_det_jacobians_mean", tf.reshape(log_det_jacobians_mean, [])) tf.summary.scalar("log_det_jacobians_var", tf.reshape(log_det_jacobians_var, [])) tf.summary.scalar("prior_log_likelihood_mean", tf.reshape(prior_log_likelihood_mean, [])) tf.summary.scalar("prior_log_likelihood_var", tf.reshape(prior_log_likelihood_var, [])) tf.summary.scalar("l2_z_mean", tf.reshape(z_l2_mean, [])) tf.summary.scalar("z_l2_var", tf.reshape(z_l2_var, [])) # Generate samples for visualization. random_image = transformed_glow_flow.sample(16) image_tile_summary("random/sample", tf.to_float(random_image), rows=4, cols=4) global_step = tf.train.get_or_create_global_step() learning_rate = tf.train.cosine_decay(params['learning_rate'], global_step, params['max_steps']) tf.summary.scalar("learning_rate", learning_rate) optimizer = tf.train.AdamOptimizer(learning_rate) gradients, variables = zip(*optimizer.compute_gradients(loss)) capped_gradients, gradient_norm = tf.clip_by_global_norm( gradients, clip_norm=params['clip_gradient']) capped_gradients_and_variables = zip(capped_gradients, variables) train_op = optimizer.apply_gradients(capped_gradients_and_variables, global_step=global_step) gradient_norm = tf.check_numerics(gradient_norm, "Gradient norm contains NaNs or Infs.") tf.summary.scalar("gradient_norm", tf.reshape(gradient_norm, [])) return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op, eval_metric_ops={ "log_probs": tf.metrics.mean(log_probs), })