def benchmark_train(self):
        for batch_size in [64, 128, 256]:
            # Generate some random data.
            burn_batches, measure_batches = (3, 100)
            burn_images = [
                tf.random_normal([batch_size, 784])
                for _ in range(burn_batches)
            ]
            burn_dataset = tf.data.Dataset.from_tensor_slices(burn_images)
            measure_images = [
                tf.random_normal([batch_size, 784])
                for _ in range(measure_batches)
            ]
            measure_dataset = tf.data.Dataset.from_tensor_slices(
                measure_images)

            step_counter = tf.train.get_or_create_global_step()
            with tf.device(device()):
                # Create the models and optimizers
                generator = mnist.Generator(data_format())
                discriminator = mnist.Discriminator(data_format())
                with tf.variable_scope('generator'):
                    generator_optimizer = tf.compat.v1.train.AdamOptimizer(
                        0.001)
                with tf.variable_scope('discriminator'):
                    discriminator_optimizer = tf.compat.v1.train.AdamOptimizer(
                        0.001)

                with tf.contrib.summary.create_file_writer(
                        tempfile.mkdtemp(),
                        flush_millis=SUMMARY_FLUSH_MS).as_default():

                    # warm up
                    mnist.train_one_epoch(generator,
                                          discriminator,
                                          generator_optimizer,
                                          discriminator_optimizer,
                                          burn_dataset,
                                          step_counter,
                                          log_interval=SUMMARY_INTERVAL,
                                          noise_dim=NOISE_DIM)
                    # measure
                    start = time.time()
                    mnist.train_one_epoch(generator,
                                          discriminator,
                                          generator_optimizer,
                                          discriminator_optimizer,
                                          measure_dataset,
                                          step_counter,
                                          log_interval=SUMMARY_INTERVAL,
                                          noise_dim=NOISE_DIM)
                    self._report('train', start, measure_batches, batch_size)
Beispiel #2
0
  def benchmark_train(self):
    for batch_size in [64, 128, 256]:
      # Generate some random data.
      burn_batches, measure_batches = (3, 100)
      burn_images = [tf.random_normal([batch_size, 784])
                     for _ in range(burn_batches)]
      burn_dataset = tf.data.Dataset.from_tensor_slices(burn_images)
      measure_images = [tf.random_normal([batch_size, 784])
                        for _ in range(measure_batches)]
      measure_dataset = tf.data.Dataset.from_tensor_slices(measure_images)

      step_counter = tf.train.get_or_create_global_step()
      with tf.device(device()):
        # Create the models and optimizers
        generator = mnist.Generator(data_format())
        discriminator = mnist.Discriminator(data_format())
        with tf.variable_scope('generator'):
          generator_optimizer = tf.train.AdamOptimizer(0.001)
        with tf.variable_scope('discriminator'):
          discriminator_optimizer = tf.train.AdamOptimizer(0.001)

        with tf.contrib.summary.create_file_writer(
            tempfile.mkdtemp(), flush_millis=SUMMARY_FLUSH_MS).as_default():

          # warm up
          mnist.train_one_epoch(generator, discriminator, generator_optimizer,
                                discriminator_optimizer,
                                burn_dataset, step_counter,
                                log_interval=SUMMARY_INTERVAL,
                                noise_dim=NOISE_DIM)
          # measure
          start = time.time()
          mnist.train_one_epoch(generator, discriminator, generator_optimizer,
                                discriminator_optimizer,
                                measure_dataset, step_counter,
                                log_interval=SUMMARY_INTERVAL,
                                noise_dim=NOISE_DIM)
          self._report('train', start, measure_batches, batch_size)