def top_prior(self): """Objective based on the prior over latent z. Returns: dist: instance of tf.distributions.Normal, prior distribution. """ return glow_ops.top_prior( "top_prior", self.z_top_shape, learn_prior=self.hparams.top_prior)
def top_prior(self): """Objective based on the prior over latent z. Returns: dist: instance of tfp.distributions.Normal, prior distribution. """ return glow_ops.top_prior( "top_prior", self.z_top_shape, learn_prior=self.hparams.top_prior, temperature=self.temperature)
def top_prior(self, z): """Objective based on the prior over latent z. Args: z: 4-D Tensor, (batch_size, height, width, num_channels) Returns: objective: float, log-likelihood of z under the prior. dist: instance of tf.distributions.Normal, prior distribution. """ return glow_ops.top_prior( "top_prior", z, learn_prior=self.hparams.top_prior)
def body(self, features): x = features["inputs"] # Scale x such that the pixels lie in-between -0.5 and.0.5 x = self.preprocess(x) n_bins = 2**self.hparams.n_bits_x batch_size, height, width, n_channels = common_layers.shape_list(x) hwc = float(height * width * n_channels) x = x + tf.random_uniform( shape=(batch_size, height, width, n_channels), minval=0.0, maxval=1.0 / n_bins) objective = -np.log(n_bins) * hwc * tf.ones(batch_size) # The arg_scope call ensures that the actnorm parameters are set such that # the per-channel output activations have zero mean and unit variance # ONLY during the first step. After that the parameters are learned # through optimisation. global_step = tf.train.get_or_create_global_step() init_op = tf.logical_and(tf.equal(global_step, 0), self.is_training) ops = [glow_ops.get_variable_ddi, glow_ops.actnorm] with arg_scope(ops, init=init_op): self.z, encoder_objective, self.eps = glow_ops.encoder_decoder( "codec", x, self.hparams, eps=None, reverse=False) objective += encoder_objective prior_objective, prior_dist = glow_ops.top_prior( "top_prior", self.z, learn_prior=self.hparams.learn_prior) self.z_sample = prior_dist.sample() objective += prior_objective # bits per pixel objective = -objective / (np.log(2) * hwc) return tf.zeros_like(features["targets"]), {"training": objective}
def uncond_top_dist(self): """Get an unconditional prior distribution on the top latent.""" prior_dist = glow_ops.top_prior("unconditional", self.z_top_shape, learn_prior="single_conv") return prior_dist.loc, prior_dist.scale