コード例 #1
0
    def testBijective(self):
        flow = GlowFlow(level=3, level_depth=3, validate_args=False)
        x = tf.random_uniform(
            (self.batch_size, ) + self.event_dims, dtype=tf.float32)
        z = flow.inverse(x)
        x_ = flow.forward(tf.identity(z))

        assert z.shape == x.shape

        with self.test_session():
            self.assertAllEqual(x, x_.numpy())
コード例 #2
0
 def testInverse(self):
     flow = GlowFlow(level=3, level_depth=3, validate_args=False)
     x = tf.random_uniform(
         (self.batch_size, ) + self.event_dims, dtype=tf.float32)
     z = flow.inverse(x)
コード例 #3
0
ファイル: train.py プロジェクト: olegmyrk/glow-flow
def model_fn(features, labels, mode, params, config):
    """Build the glow flow model function for use in an estimator.

    Arguments:
        features: The input features for the estimator.
        labels: The labels, unused here.
        mode: Signifies whether it is train or test or predict.
        params: Some hyperparameters as a dictionary.
        config: The RunConfig, unused here.
        Returns:
        EstimatorSpec: A tf.estimator.EstimatorSpec instance.
    """
    base_distribution = tfd.MultivariateNormalDiag(
        loc=tf.zeros(np.prod(features.shape[-3:])),
        scale_diag=tf.ones(np.prod(features.shape[-3:])))

    glow_flow = GlowFlow(
        num_levels=params['num_levels'],
        level_depth=params['level_depth'])

    #glow_flow = GlowStep(
    #                # Infer at the time of first forward
    #                input_shape=None,
    #                depth=params['level_depth'],
    #                name="glow_step")

    transformed_glow_flow = tfd.TransformedDistribution(
        distribution=
            tfd.TransformedDistribution(
                distribution=base_distribution,
                bijector=tfp.bijectors.Reshape(event_shape_out=features.shape[-3:], event_shape_in=[np.prod(features.shape[-3:])])
            ),
        bijector=glow_flow,
        name="transformed_glow_flow")

    image_tile_summary("input", tf.to_float(features), rows=1, cols=16)

    z = tf.reshape(glow_flow.inverse(features), [-1, np.prod(features.shape[-3:])])
    prior_log_probs = base_distribution.log_prob(z)
    prior_log_likelihood = -tf.reduce_mean(prior_log_probs)
    log_det_jacobians = glow_flow.inverse_log_det_jacobian(features, event_ndims=3)
    log_probs = prior_log_probs + log_det_jacobians

    # Sanity check, remove when tested
    with tf.control_dependencies([tf.equal(log_probs, transformed_glow_flow.log_prob(features))]):
      negative_log_likelihood = -tf.reduce_mean(log_probs)
      bpd = bits_per_dim(negative_log_likelihood, features.shape[-3:])

    loss = negative_log_likelihood

    tf.summary.scalar(
        "negative_log_likelihood",
        tf.reshape(negative_log_likelihood, []))
    tf.summary.scalar("bit_per_dim", tf.reshape(bpd, []))

    z_l2 = tf.norm(z, axis=1)
    z_l2_mean, z_l2_var = tf.nn.moments(z_l2, axes=0)
    log_det_jacobians_mean, log_det_jacobians_var = tf.nn.moments(
        log_det_jacobians, axes=0)
    prior_log_probs_mean, prior_log_probs_var = tf.nn.moments(
        prior_log_probs, axes=0)

    tf.summary.scalar("log_det_jacobians_mean",
                      tf.reshape(log_det_jacobians_mean, []))
    tf.summary.scalar("log_det_jacobians_var",
                      tf.reshape(log_det_jacobians_var, []))

    tf.summary.scalar("prior_log_probs_mean",
                      tf.reshape(prior_log_probs_mean, []))
    tf.summary.scalar("prior_log_probs_var",
                      tf.reshape(prior_log_probs_var, []))

    tf.summary.scalar("l2_z_mean", tf.reshape(z_l2_mean, []))
    tf.summary.scalar("z_l2_var", tf.reshape(z_l2_var, []))

    # Generate samples for visualization.
    random_image = transformed_glow_flow.sample(16)
    image_tile_summary(
        "random/sample", tf.to_float(random_image), rows=4, cols=4)

    global_step = tf.train.get_or_create_global_step()
    learning_rate = tf.train.cosine_decay(
        params['learning_rate'], global_step, params['max_steps'])
    tf.summary.scalar("learning_rate", learning_rate)

    optimizer = tf.train.AdamOptimizer(learning_rate)
    gradients, variables = zip(*optimizer.compute_gradients(loss))
    capped_gradients, gradient_norm = tf.clip_by_global_norm(
        gradients, clip_norm=params['clip_gradient'])
    capped_gradients_and_variables = zip(capped_gradients, variables)
    train_op = optimizer.apply_gradients(
        capped_gradients_and_variables, global_step=global_step)

    gradient_norm = tf.check_numerics(
        gradient_norm, "Gradient norm contains NaNs or Infs.")
    tf.summary.scalar("gradient_norm", tf.reshape(gradient_norm, []))

    return tf.estimator.EstimatorSpec(
        mode=mode,
        loss=loss,
        train_op=train_op,
        eval_metric_ops={
            "log_probs": tf.metrics.mean(log_probs),
        })