示例#1
0
    def _run_and_report_benchmark(self, **kwargs):
        start_time_sec = time.time()
        train_loss, test_loss = distributed_train.main(**kwargs)
        wall_time_sec = time.time() - start_time_sec

        extras = {'train_loss': train_loss, 'test_loss': test_loss}

        self.report_benchmark(wall_time=wall_time_sec, extras=extras)
示例#2
0
  def _run_and_report_benchmark(self, **kwargs):
    start_time_sec = time.time()
    train_loss, test_loss = distributed_train.main(**kwargs)
    wall_time_sec = time.time() - start_time_sec

    extras = {'train_loss': train_loss,
              'test_loss': test_loss}

    self.report_benchmark(
        wall_time=wall_time_sec, extras=extras)
  def test_one_epoch_multi_device(self):
    if tf.test.is_gpu_available():
      print('Using 2 virtual GPUs.')
      device = tf.config.experimental.list_physical_devices('GPU')[0]
      tf.config.experimental.set_virtual_device_configuration(
          device, [
              tf.config.experimental.VirtualDeviceConfiguration(
                  memory_limit=8192),
              tf.config.experimental.VirtualDeviceConfiguration(
                  memory_limit=8192)
          ])

    kwargs = utils.get_common_kwargs()
    kwargs.update({
        'epochs': 1,
        'batch_size': 16,
        'num_examples': 10,
        'embedding_dim': 4,
        'enc_units': 4,
        'dec_units': 4
    })

    distributed_train.main(**kwargs)