Exemplo n.º 1
0
    def model_fn(self, features, labels, mode, params):
        """TPUEstimator compatible model function."""
        del labels
        is_training = (mode == tf.estimator.ModeKeys.TRAIN)
        data_shape = features.get_shape().as_list()[1:]
        z_mean, z_logvar = self.gaussian_encoder(features,
                                                 is_training=is_training)
        z_sampled = self.sample_from_latent_distribution(z_mean, z_logvar)
        reconstructions = self.decode(z_sampled, data_shape, is_training)
        per_sample_loss = losses.make_reconstruction_loss(
            features, reconstructions)
        reconstruction_loss = tf.reduce_mean(per_sample_loss)
        kl_loss = compute_gaussian_kl(z_mean, z_logvar)
        regularizer = self.regularizer(kl_loss, z_mean, z_logvar, z_sampled)
        loss = tf.add(reconstruction_loss, regularizer, name="loss")
        elbo = tf.add(reconstruction_loss, kl_loss, name="elbo")
        additional_logging = self.get_additional_logging()

        if mode == tf.estimator.ModeKeys.TRAIN:
            optimizer = optimizers.make_vae_optimizer()
            self._optimizer = optimizer
            update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
            train_op = optimizer.minimize(
                loss=loss, global_step=tf.train.get_global_step())
            train_op = tf.group(
                [train_op, update_ops, *self.get_additional_ops()])

            tf.summary.scalar("reconstruction_loss", reconstruction_loss)
            tf.summary.scalar("elbo", -elbo)

            for log_name, log_val in additional_logging.items():
                tf.summary.scalar(log_name, log_val)

            logging_hook = tf.train.LoggingTensorHook(
                {
                    "loss": loss,
                    "reconstruction_loss": reconstruction_loss,
                    "elbo": -elbo,
                    **additional_logging,
                },
                every_n_iter=100)
            return tf.contrib.tpu.TPUEstimatorSpec(
                mode=mode,
                loss=loss,
                train_op=train_op,
                training_hooks=[logging_hook])
        elif mode == tf.estimator.ModeKeys.EVAL:
            return tf.contrib.tpu.TPUEstimatorSpec(
                mode=mode,
                loss=loss,
                eval_metrics=(make_metric_fn("reconstruction_loss", "elbo",
                                             "regularizer", "kl_loss",
                                             *additional_logging.keys()), [
                                                 reconstruction_loss, -elbo,
                                                 regularizer, kl_loss,
                                                 *additional_logging.values()
                                             ]))
        else:
            raise NotImplementedError("Eval mode not supported.")
Exemplo n.º 2
0
    def model_fn(self, features, labels, mode, params):
        """TPUEstimator compatible model function."""
        del labels
        is_training = (mode == tf.estimator.ModeKeys.TRAIN)
        data_shape = features.get_shape().as_list()[1:]
        batch_size = tf.shape(features)[0]
        z_mean, z_logvar = self.gaussian_encoder(features,
                                                 is_training=is_training)
        z_sampled = self.sample_from_latent_distribution(z_mean, z_logvar)

        # z_sampled_sum = z_sampled[:batch_size // 2] + \
        # z_sampled[batch_size // 2:]
        # z_sampled_all = tf.concat([z_sampled, z_sampled_sum], axis=0)
        z_sampled_all = z_sampled
        reconstructions, group_feats_G, lie_alg_basis = self.decode_with_gfeats(
            z_sampled_all, data_shape, is_training)

        per_sample_loss = losses.make_reconstruction_loss(
            features, reconstructions[:batch_size])
        reconstruction_loss = tf.reduce_mean(per_sample_loss)
        kl_loss = compute_gaussian_kl(z_mean, z_logvar)
        regularizer = self.regularizer(kl_loss, z_mean, z_logvar, z_sampled,
                                       group_feats_G, lie_alg_basis,
                                       batch_size)
        loss = tf.add(reconstruction_loss, regularizer, name="loss")
        elbo = tf.add(reconstruction_loss, kl_loss, name="elbo")
        if mode == tf.estimator.ModeKeys.TRAIN:
            optimizer = optimizers.make_vae_optimizer()
            update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
            train_op = optimizer.minimize(
                loss=loss, global_step=tf.train.get_global_step())
            train_op = tf.group([train_op, update_ops])
            tf.summary.scalar("reconstruction_loss", reconstruction_loss)
            tf.summary.scalar("elbo", -elbo)

            logging_hook = tf.train.LoggingTensorHook(
                {
                    "loss": loss,
                    "reconstruction_loss": reconstruction_loss,
                    "elbo": -elbo
                },
                every_n_iter=100)
            return contrib_tpu.TPUEstimatorSpec(mode=mode,
                                                loss=loss,
                                                train_op=train_op,
                                                training_hooks=[logging_hook])
        elif mode == tf.estimator.ModeKeys.EVAL:
            return contrib_tpu.TPUEstimatorSpec(
                mode=mode,
                loss=loss,
                eval_metrics=(make_metric_fn("reconstruction_loss", "elbo",
                                             "regularizer", "kl_loss"), [
                                                 reconstruction_loss, -elbo,
                                                 regularizer, kl_loss
                                             ]))
        else:
            raise NotImplementedError("Eval mode not supported.")
Exemplo n.º 3
0
    def model_fn(self, features, labels, mode, params):
        """TPUEstimator compatible model function."""
        is_training = (mode == tf.estimator.ModeKeys.TRAIN)
        output_shape = labels.get_shape().as_list()[
            1:]  # labels are true images in this case
        reconstructions = self.forward_pass(features,
                                            output_shape,
                                            is_training=is_training)
        per_sample_loss = losses.make_reconstruction_loss(
            labels, reconstructions)
        reconstruction_loss = tf.reduce_mean(per_sample_loss)

        if mode == tf.estimator.ModeKeys.TRAIN:
            optimizer = optimizers.make_decoder_optimizer()
            update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
            train_op = optimizer.minimize(
                loss=reconstruction_loss,
                global_step=tf.train.get_global_step())
            train_op = tf.group([train_op, update_ops])
            tf.summary.scalar("reconstruction_loss", reconstruction_loss)

            logging_hook = tf.train.LoggingTensorHook(
                {
                    "reconstruction_loss": reconstruction_loss,
                },
                every_n_iter=100)
            return tf.contrib.tpu.TPUEstimatorSpec(
                mode=mode,
                loss=reconstruction_loss,
                train_op=train_op,
                training_hooks=[logging_hook])
        elif mode == tf.estimator.ModeKeys.EVAL:
            return tf.contrib.tpu.TPUEstimatorSpec(
                mode=mode,
                loss=reconstruction_loss,
                eval_metrics=(make_metric_fn("reconstruction_loss"),
                              [reconstruction_loss]))
        else:
            raise NotImplementedError("Eval mode not supported.")
Exemplo n.º 4
0
 def model_fn(self, features, labels, mode, params):
     """TPUEstimator compatible model function."""
     del labels
     is_training = (mode == tf.estimator.ModeKeys.TRAIN)
     data_shape = features.get_shape().as_list()[1:]
     z_mean, z_logvar = self.gaussian_encoder(features,
                                              is_training=is_training)
     z_sampled = self.sample_from_latent_distribution(z_mean, z_logvar)
     z_shuffle = shuffle_codes(z_sampled)
     with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
         logits_z, probs_z = architectures.make_discriminator(
             z_sampled, is_training=is_training)
         _, probs_z_shuffle = architectures.make_discriminator(
             z_shuffle, is_training=is_training)
     reconstructions = self.decode(z_sampled, data_shape, is_training)
     per_sample_loss = losses.make_reconstruction_loss(
         features, reconstructions)
     reconstruction_loss = tf.reduce_mean(per_sample_loss)
     kl_loss = compute_gaussian_kl(z_mean, z_logvar)
     standard_vae_loss = tf.add(reconstruction_loss,
                                kl_loss,
                                name="VAE_loss")
     # tc = E[log(p_real)-log(p_fake)] = E[logit_real - logit_fake]
     tc_loss_per_sample = logits_z[:, 0] - logits_z[:, 1]
     tc_loss = tf.reduce_mean(tc_loss_per_sample, axis=0)
     regularizer = kl_loss + self.gamma * tc_loss
     factor_vae_loss = tf.add(standard_vae_loss,
                              self.gamma * tc_loss,
                              name="factor_VAE_loss")
     discr_loss = tf.add(0.5 * tf.reduce_mean(tf.log(probs_z[:, 0])),
                         0.5 *
                         tf.reduce_mean(tf.log(probs_z_shuffle[:, 1])),
                         name="discriminator_loss")
     if mode == tf.estimator.ModeKeys.TRAIN:
         optimizer_vae = optimizers.make_vae_optimizer()
         optimizer_discriminator = optimizers.make_discriminator_optimizer()
         all_variables = tf.trainable_variables()
         encoder_vars = [
             var for var in all_variables if "encoder" in var.name
         ]
         decoder_vars = [
             var for var in all_variables if "decoder" in var.name
         ]
         discriminator_vars = [var for var in all_variables \
                               if "discriminator" in var.name]
         update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
         train_op_vae = optimizer_vae.minimize(
             loss=factor_vae_loss,
             global_step=tf.train.get_global_step(),
             var_list=encoder_vars + decoder_vars)
         train_op_discr = optimizer_discriminator.minimize(
             loss=-discr_loss,
             global_step=tf.train.get_global_step(),
             var_list=discriminator_vars)
         train_op = tf.group(train_op_vae, train_op_discr, update_ops)
         tf.summary.scalar("reconstruction_loss", reconstruction_loss)
         logging_hook = tf.train.LoggingTensorHook(
             {
                 "loss": factor_vae_loss,
                 "reconstruction_loss": reconstruction_loss
             },
             every_n_iter=50)
         return tf.contrib.tpu.TPUEstimatorSpec(
             mode=mode,
             loss=factor_vae_loss,
             train_op=train_op,
             training_hooks=[logging_hook])
     elif mode == tf.estimator.ModeKeys.EVAL:
         return tf.contrib.tpu.TPUEstimatorSpec(
             mode=mode,
             loss=factor_vae_loss,
             eval_metrics=(make_metric_fn("reconstruction_loss",
                                          "regularizer", "kl_loss"),
                           [reconstruction_loss, regularizer, kl_loss]))
     else:
         raise NotImplementedError("Eval mode not supported.")
Exemplo n.º 5
0
    def model_fn(self, features, labels, mode, params):
        """TPUEstimator compatible model function."""
        del labels
        is_training = (mode == tf.estimator.ModeKeys.TRAIN)
        data_shape = features.get_shape().as_list()[1:]
        output = self.gaussian_encoder(features, is_training=is_training)
        if len(output) == 2:
            z_mean, z_logvar = output
        else:
            z_mean, z_logvar, L0_reg, mask = output
        z_sampled = self.sample_from_latent_distribution(z_mean, z_logvar)
        reconstructions = self.decode(z_sampled, data_shape, is_training)
        per_sample_loss = losses.make_reconstruction_loss(
            features, reconstructions)
        reconstruction_loss = tf.reduce_mean(per_sample_loss)
        kl_loss = compute_gaussian_kl(z_mean, z_logvar, mask)
        # regularizer = self.regularizer(kl_loss, z_mean, z_logvar, z_sampled, mask, L0_reg)
        regularizer = self.regularizer(kl_loss, z_mean, z_logvar, z_sampled)
        if len(output) == 2:
            loss = tf.add(reconstruction_loss, regularizer, name="loss")
        else:
            loss = tf.add(reconstruction_loss,
                          regularizer + L0_reg / 500000.,
                          name="loss")

        loss = tf.add(reconstruction_loss, regularizer, name="loss")
        elbo = tf.add(reconstruction_loss, kl_loss, name="elbo")
        if mode == tf.estimator.ModeKeys.TRAIN:
            optimizer = optimizers.make_vae_optimizer()
            update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
            train_op = optimizer.minimize(
                loss=loss, global_step=tf.train.get_global_step())
            train_op = tf.group([train_op, update_ops])
            # tf.summary.scalar("L0_reg", L0_reg)
            # tf.summary.scalar("mask_sum",tf.reduce_sum(mask))
            tf.summary.scalar("reconstruction_loss", reconstruction_loss)
            tf.summary.scalar("elbo", elbo)

            logging_hook = tf.train.LoggingTensorHook(
                {
                    "loss": loss,
                    "reconstruction_loss": reconstruction_loss,
                    "elbo": -elbo
                },
                every_n_iter=100)
            return tf.contrib.tpu.TPUEstimatorSpec(
                mode=mode,
                loss=loss,
                train_op=train_op,
                training_hooks=[logging_hook])
        elif mode == tf.estimator.ModeKeys.EVAL:
            return tf.contrib.tpu.TPUEstimatorSpec(
                mode=mode,
                loss=loss,
                eval_metrics=(make_metric_fn("reconstruction_loss", "elbo",
                                             "regularizer", "kl_loss"), [
                                                 reconstruction_loss, -elbo,
                                                 regularizer, kl_loss
                                             ]))
        else:
            raise NotImplementedError("Eval mode not supported.")
Exemplo n.º 6
0
  def model_fn(self, features, labels, mode, params):
    """TPUEstimator compatible model function."""
    is_training = (mode == tf.estimator.ModeKeys.TRAIN)
    data_shape = features.get_shape().as_list()[1:]
    data_shape[0] = int(data_shape[0] / 2)
    features_1 = features[:, :data_shape[0], :, :]
    features_2 = features[:, data_shape[0]:, :, :]
    with tf.variable_scope(
        tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
      z_mean, z_logvar = self.gaussian_encoder(features_1,
                                               is_training=is_training)
      z_mean_2, z_logvar_2 = self.gaussian_encoder(features_2,
                                                   is_training=is_training)
    labels = tf.squeeze(tf.one_hot(labels, z_mean.get_shape().as_list()[1]))
    kl_per_point = compute_kl(z_mean, z_mean_2, z_logvar, z_logvar_2)

    new_mean = 0.5 * z_mean + 0.5 * z_mean_2
    var_1 = tf.exp(z_logvar)
    var_2 = tf.exp(z_logvar_2)
    new_log_var = tf.math.log(0.5*var_1 + 0.5*var_2)

    mean_sample_1, log_var_sample_1 = self.aggregate(
        z_mean, z_logvar, new_mean, new_log_var, labels, kl_per_point)
    mean_sample_2, log_var_sample_2 = self.aggregate(
        z_mean_2, z_logvar_2, new_mean, new_log_var, labels, kl_per_point)
    z_sampled_1 = self.sample_from_latent_distribution(
        mean_sample_1, log_var_sample_1)
    z_sampled_2 = self.sample_from_latent_distribution(
        mean_sample_2, log_var_sample_2)
    with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
      reconstructions_1 = self.decode(z_sampled_1, data_shape, is_training)
      reconstructions_2 = self.decode(z_sampled_2, data_shape, is_training)
    per_sample_loss_1 = losses.make_reconstruction_loss(
        features_1, reconstructions_1)
    per_sample_loss_2 = losses.make_reconstruction_loss(
        features_2, reconstructions_2)
    reconstruction_loss_1 = tf.reduce_mean(per_sample_loss_1)
    reconstruction_loss_2 = tf.reduce_mean(per_sample_loss_2)
    reconstruction_loss = (0.5 * reconstruction_loss_1 +
                           0.5 * reconstruction_loss_2)
    kl_loss_1 = vae.compute_gaussian_kl(mean_sample_1, log_var_sample_1)
    kl_loss_2 = vae.compute_gaussian_kl(mean_sample_2, log_var_sample_2)
    kl_loss = 0.5 * kl_loss_1 + 0.5 * kl_loss_2
    regularizer = self.regularizer(
        kl_loss, None, None, None)

    loss = tf.add(reconstruction_loss,
                  regularizer,
                  name="loss")
    elbo = tf.add(reconstruction_loss, kl_loss, name="elbo")
    if mode == tf.estimator.ModeKeys.TRAIN:
      optimizer = optimizers.make_vae_optimizer()
      update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
      train_op = optimizer.minimize(
          loss=loss, global_step=tf.train.get_global_step())
      train_op = tf.group([train_op, update_ops])
      tf.summary.scalar("reconstruction_loss", reconstruction_loss)
      tf.summary.scalar("elbo", -elbo)
      logging_hook = tf.train.LoggingTensorHook({
          "loss": loss,
          "reconstruction_loss": reconstruction_loss,
          "elbo": -elbo,
      },
                                                every_n_iter=100)
      return TPUEstimatorSpec(
          mode=mode,
          loss=loss,
          train_op=train_op,
          training_hooks=[logging_hook])
    elif mode == tf.estimator.ModeKeys.EVAL:
      return TPUEstimatorSpec(
          mode=mode,
          loss=loss,
          eval_metrics=(make_metric_fn("reconstruction_loss", "elbo",
                                       "regularizer", "kl_loss"),
                        [reconstruction_loss, -elbo, regularizer, kl_loss]))
    else:
      raise NotImplementedError("Eval mode not supported.")
Exemplo n.º 7
0
    def model_fn(self, features, labels, mode, params):
        """TPUEstimator compatible model function."""
        is_training = (mode == tf.estimator.ModeKeys.TRAIN)
        labelled_features = labels[0]
        labels = tf.to_float(labels[1])
        data_shape = features.get_shape().as_list()[1:]
        with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
            z_mean, z_logvar = self.gaussian_encoder(features,
                                                     is_training=is_training)
            z_mean_labelled, _ = self.gaussian_encoder(labelled_features,
                                                       is_training=is_training)

        supervised_loss = []
        mine_ops = []

        for l in range(labels.get_shape().as_list()[1]):
            for r in range(z_mean.get_shape().as_list()[1]):
                label_for_mi = tf.layers.flatten(labels[:, l])
                representation_for_mi = tf.layers.flatten(z_mean_labelled[:,
                                                                          r])
                mi_lr, op_lr = mine(representation_for_mi, label_for_mi,
                                    "estimator_network_%d_%d" % (l, r))
                if l != r:
                    supervised_loss = supervised_loss + [tf.math.square(mi_lr)]
                mine_ops = mine_ops + [op_lr]
        supervised_loss = tf.reshape(tf.add_n(supervised_loss), [])
        z_sampled = self.sample_from_latent_distribution(z_mean, z_logvar)
        reconstructions = self.decode(z_sampled, data_shape, is_training)
        per_sample_loss = losses.make_reconstruction_loss(
            features, reconstructions)
        reconstruction_loss = tf.reduce_mean(per_sample_loss)
        kl_loss = compute_gaussian_kl(z_mean, z_logvar)
        standard_vae_loss = tf.add(reconstruction_loss,
                                   self.beta * kl_loss,
                                   name="VAE_loss")
        gamma_annealed = make_annealer(self.gamma_sup,
                                       tf.train.get_global_step())
        s2_mine_vae_loss = tf.add(standard_vae_loss,
                                  gamma_annealed * supervised_loss,
                                  name="s2_factor_VAE_loss")
        if mode == tf.estimator.ModeKeys.TRAIN:
            optimizer_vae = optimizers.make_vae_optimizer()
            all_variables = tf.trainable_variables()
            encoder_vars = [
                var for var in all_variables if "encoder" in var.name
            ]
            decoder_vars = [
                var for var in all_variables if "decoder" in var.name
            ]

            update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
            train_op_vae = optimizer_vae.minimize(
                loss=s2_mine_vae_loss,
                global_step=tf.train.get_global_step(),
                var_list=encoder_vars + decoder_vars)
            train_op = tf.group(train_op_vae, mine_ops, update_ops)
            tf.summary.scalar("reconstruction_loss", reconstruction_loss)
            logging_hook = tf.train.LoggingTensorHook(
                {
                    "loss": s2_mine_vae_loss,
                    "reconstruction_loss": reconstruction_loss,
                    "supervised_loss": supervised_loss,
                },
                every_n_iter=50)
            return TPUEstimatorSpec(mode=mode,
                                    loss=s2_mine_vae_loss,
                                    train_op=train_op,
                                    training_hooks=[logging_hook])
        elif mode == tf.estimator.ModeKeys.EVAL:
            return TPUEstimatorSpec(
                mode=mode,
                loss=s2_mine_vae_loss,
                eval_metrics=(make_metric_fn("reconstruction_loss",
                                             "supervised_loss", "kl_loss"),
                              [reconstruction_loss, supervised_loss, kl_loss]))
        else:
            raise NotImplementedError("Eval mode not supported.")
Exemplo n.º 8
0
    def model_fn(self, features, labels, mode, params):
        """TPUEstimator compatible model function.

    Args:
      features: Batch of images [batch_size, 64, 64, 3].
      labels: Tuple with batch of features [batch_size, 64, 64, 3] and the
        labels [batch_size, labels_size].
      mode: Mode for the TPUEstimator.
      params: Dict with parameters.

    Returns:
      TPU estimator.
    """

        is_training = (mode == tf.estimator.ModeKeys.TRAIN)
        labelled_features = labels[0]
        labels = tf.to_float(labels[1])
        data_shape = features.get_shape().as_list()[1:]
        with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
            z_mean, z_logvar = self.gaussian_encoder(features,
                                                     is_training=is_training)
            z_mean_labelled, _ = self.gaussian_encoder(labelled_features,
                                                       is_training=is_training)
        z_sampled = self.sample_from_latent_distribution(z_mean, z_logvar)
        reconstructions = self.decode(z_sampled, data_shape, is_training)
        per_sample_loss = losses.make_reconstruction_loss(
            features, reconstructions)
        reconstruction_loss = tf.reduce_mean(per_sample_loss)
        kl_loss = compute_gaussian_kl(z_mean, z_logvar)
        gamma_annealed = make_annealer(self.gamma_sup,
                                       tf.train.get_global_step())
        supervised_loss = make_supervised_loss(z_mean_labelled, labels,
                                               self.factor_sizes)
        regularizer = self.unsupervised_regularizer(
            kl_loss, z_mean, z_logvar,
            z_sampled) + gamma_annealed * supervised_loss
        loss = tf.add(reconstruction_loss, regularizer, name="loss")
        elbo = tf.add(reconstruction_loss, kl_loss, name="elbo")
        if mode == tf.estimator.ModeKeys.TRAIN:
            optimizer = optimizers.make_vae_optimizer()
            update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
            train_op = optimizer.minimize(
                loss=loss, global_step=tf.train.get_global_step())
            train_op = tf.group([train_op, update_ops])
            tf.summary.scalar("reconstruction_loss", reconstruction_loss)
            tf.summary.scalar("elbo", -elbo)

            logging_hook = tf.train.LoggingTensorHook(
                {
                    "loss": loss,
                    "reconstruction_loss": reconstruction_loss,
                    "elbo": -elbo,
                    "supervised_loss": supervised_loss
                },
                every_n_iter=100)
            return TPUEstimatorSpec(mode=mode,
                                    loss=loss,
                                    train_op=train_op,
                                    training_hooks=[logging_hook])
        elif mode == tf.estimator.ModeKeys.EVAL:
            return TPUEstimatorSpec(mode=mode,
                                    loss=loss,
                                    eval_metrics=(make_metric_fn(
                                        "reconstruction_loss", "elbo",
                                        "regularizer", "kl_loss",
                                        "supervised_loss"), [
                                            reconstruction_loss, -elbo,
                                            regularizer, kl_loss,
                                            supervised_loss
                                        ]))
        else:
            raise NotImplementedError("Eval mode not supported.")