Exemplo n.º 1
0
 def _train_generator(self, features, labels, step, optimizer, params):
     # Set the random offset tensor for operations in tpu_random.py.
     tpu_random.set_random_offset_from_features(features)
     # create_loss will set self.g_loss.
     self.create_loss(features, labels, params=params)
     update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
     with tf.control_dependencies(update_ops):
         train_op = optimizer.minimize(
             self.g_loss,
             var_list=self.generator.trainable_variables,
             global_step=step)
         if self._g_use_ema:
             g_vars = self.generator.trainable_variables
             with tf.name_scope("generator_ema"):
                 logging.info("Creating moving averages of weights: %s",
                              g_vars)
                 # The decay value is set to 0 if we're before the moving-average start
                 # point, so that the EMA vars will be the normal vars.
                 decay = self._ema_decay * tf.cast(
                     tf.greater_equal(step, self._ema_start_step),
                     tf.float32)
                 ema = tf.train.ExponentialMovingAverage(decay=decay)
                 with tf.control_dependencies([train_op]):
                     train_op = ema.apply(g_vars)
         with tf.control_dependencies([train_op]):
             return tf.identity(self.g_loss)
Exemplo n.º 2
0
 def _train_discriminator(self, features, labels, step, optimizer, params):
   features = features.copy()
   features["generated"] = tf.stop_gradient(features["generated"])
   # Set the random offset tensor for operations in tpu_random.py.
   tpu_random.set_random_offset_from_features(features)
   # create_loss will set self.d_loss.
   self.create_loss(features, labels, params=params)
   update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
   with tf.control_dependencies(update_ops):
     train_op = optimizer.minimize(
         self.d_loss,
         var_list=self.discriminator.trainable_variables,
         global_step=step)
     with tf.control_dependencies([train_op]):
       return tf.identity(self.d_loss)
 def model_fn(features, labels, mode, params):
     # Set the random offset tensor for operations in tpu_random.py.
     tpu_random.set_random_offset_from_features(features)
     test_op = create_op_fn()
     predictions = tf.layers.dense(features["x"], 1)
     loss = tf.losses.mean_squared_error(labels, predictions)
     optimizer = tf.train.GradientDescentOptimizer(0.01)
     if params["use_tpu"]:
         optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
     with tf.control_dependencies([test_op]):
         train_op = optimizer.minimize(
             loss, global_step=tf.train.get_or_create_global_step())
     return tf.contrib.tpu.TPUEstimatorSpec(mode=mode,
                                            loss=loss,
                                            train_op=train_op)
Exemplo n.º 4
0
        def _create_sub_step_loss(sub_step_idx=0, reuse=True):
            """Creates the loss for a slice of the current batch.

      Args:
        sub_step_idx: Index of the slice of the batch to use to construct the
            loss. If self.unroll_disc_iters is True this must be 0 and the whole
            batch will be used.
        reuse: Bool, whether to reuse existing variables for the models.
            Should be False for the first call and True on all other calls.

      Returns:
        Fake images created by the generator.
      """
            logging.info("sub_step_idx: %s, params: %s", sub_step_idx, params)
            # Set the random offset tensor for operations in tpu_random.py.
            tpu_random.set_random_offset_from_features(fs[sub_step_idx])
            self.create_loss(fs[sub_step_idx],
                             ls[sub_step_idx],
                             params,
                             is_training=is_training,
                             reuse=reuse)