Esempio n. 1
0
def _UpdateBenchmarkSpecWithFlags(benchmark_spec):
    """Update the benchmark_spec with supplied command line flags.

  Args:
    benchmark_spec: benchmark specification to update
  """
    benchmark_spec.learning_rate = FLAGS.inception3_learning_rate
    benchmark_spec.use_data = FLAGS.inception3_use_data
    benchmark_spec.mode = FLAGS.inception3_mode
    benchmark_spec.save_checkpoints_secs = FLAGS.inception3_save_checkpoints_secs
    benchmark_spec.train_batch_size = FLAGS.inception3_train_batch_size
    benchmark_spec.eval_batch_size = FLAGS.inception3_eval_batch_size
    benchmark_spec.commit = cloud_tpu_models.GetCommit(benchmark_spec.vms[0])
    benchmark_spec.data_dir = FLAGS.imagenet_data_dir
    benchmark_spec.num_train_images = FLAGS.imagenet_num_train_images
    benchmark_spec.num_eval_images = FLAGS.imagenet_num_eval_images
    benchmark_spec.num_examples_per_epoch = (
        float(benchmark_spec.num_train_images) /
        benchmark_spec.train_batch_size)
    benchmark_spec.train_epochs = FLAGS.inception3_train_epochs
    benchmark_spec.train_steps = int(benchmark_spec.train_epochs *
                                     benchmark_spec.num_examples_per_epoch)
    benchmark_spec.epochs_per_eval = FLAGS.inception3_epochs_per_eval
    benchmark_spec.steps_per_eval = int(benchmark_spec.epochs_per_eval *
                                        benchmark_spec.num_examples_per_epoch)
def CreateMetadataDict(benchmark_spec):
    """Create metadata dict to be used in run results.

  Args:
    benchmark_spec: The benchmark specification. Contains all data that is
        required to run the benchmark.

  Returns:
    metadata dict
  """
    return {
        'data_dir': benchmark_spec.data_dir,
        'use_tpu': benchmark_spec.use_tpu,
        'model_dir': benchmark_spec.model_dir,
        'train_steps': benchmark_spec.train_steps,
        'eval_steps': benchmark_spec.eval_steps,
        'tpu': benchmark_spec.tpu,
        'tpu_train': benchmark_spec.tpu_train,
        'tpu_eval': benchmark_spec.tpu_eval,
        'commit': cloud_tpu_models.GetCommit(benchmark_spec.vms[0]),
        'iterations': benchmark_spec.iterations,
        'num_shards': benchmark_spec.num_shards,
        'num_shards_train': benchmark_spec.num_shards_train,
        'num_shards_eval': benchmark_spec.num_shards_eval,
        'num_train_images': benchmark_spec.num_train_images,
        'num_eval_images': benchmark_spec.num_eval_images,
        'train_epochs': benchmark_spec.train_epochs,
        'eval_epochs': benchmark_spec.eval_epochs,
        'num_examples_per_epoch': benchmark_spec.num_examples_per_epoch,
        'train_batch_size': benchmark_spec.batch_size,
        'eval_batch_size': benchmark_spec.batch_size
    }
def CreateMetadataDict(benchmark_spec):
    """Create metadata dict to be used in run results.

  Args:
    benchmark_spec: The benchmark specification. Contains all data that is
        required to run the benchmark.

  Returns:
    metadata dict
  """
    metadata = {
        'use_tpu': bool(benchmark_spec.tpus),
        'data_dir': benchmark_spec.data_dir,
        'model_dir': benchmark_spec.model_dir,
        'train_steps': benchmark_spec.train_steps,
        'eval_steps': benchmark_spec.eval_steps,
        'commit': cloud_tpu_models.GetCommit(benchmark_spec.vms[0]),
        'iterations': benchmark_spec.iterations,
        'num_train_images': benchmark_spec.num_train_images,
        'num_eval_images': benchmark_spec.num_eval_images,
        'train_epochs': benchmark_spec.train_epochs,
        'eval_epochs': benchmark_spec.eval_epochs,
        'num_examples_per_epoch': benchmark_spec.num_examples_per_epoch,
        'train_batch_size': benchmark_spec.batch_size,
        'eval_batch_size': benchmark_spec.batch_size
    }
    if benchmark_spec.tpus:
        metadata.update({
            'train_tpu_num_shards':
            benchmark_spec.tpu_groups['train'].GetNumShards(),
            'train_tpu_accelerator_type':
            benchmark_spec.tpu_groups['train'].GetAcceleratorType()
        })
    return metadata
def _UpdateBenchmarkSpecWithFlags(benchmark_spec):
    """Update the benchmark_spec with supplied command line flags.

  Args:
    benchmark_spec: benchmark specification to update
  """
    benchmark_spec.depth = FLAGS.resnet_depth
    benchmark_spec.mode = FLAGS.resnet_mode
    benchmark_spec.train_batch_size = FLAGS.resnet_train_batch_size
    benchmark_spec.eval_batch_size = FLAGS.resnet_eval_batch_size
    benchmark_spec.data_format = FLAGS.resnet_data_format
    benchmark_spec.commit = cloud_tpu_models.GetCommit(benchmark_spec.vms[0])
    benchmark_spec.skip_host_call = FLAGS.resnet_skip_host_call
    benchmark_spec.data_dir = FLAGS.imagenet_data_dir
    benchmark_spec.num_train_images = FLAGS.imagenet_num_train_images
    benchmark_spec.num_eval_images = FLAGS.imagenet_num_eval_images
    benchmark_spec.num_examples_per_epoch = (
        float(benchmark_spec.num_train_images) /
        benchmark_spec.train_batch_size)
    benchmark_spec.train_epochs = FLAGS.resnet_train_epochs
    benchmark_spec.train_steps = int(benchmark_spec.train_epochs *
                                     benchmark_spec.num_examples_per_epoch)
    benchmark_spec.epochs_per_eval = FLAGS.resnet_epochs_per_eval
    benchmark_spec.steps_per_eval = int(benchmark_spec.epochs_per_eval *
                                        benchmark_spec.num_examples_per_epoch)
def _UpdateBenchmarkSpecWithFlags(benchmark_spec):
    """Update the benchmark_spec with supplied command line flags.

  Args:
    benchmark_spec: benchmark specification to update
  """
    benchmark_spec.depth = FLAGS.resnet_depth
    benchmark_spec.mode = FLAGS.resnet_mode
    benchmark_spec.train_steps = FLAGS.resnet_train_steps
    benchmark_spec.train_batch_size = FLAGS.resnet_train_batch_size
    benchmark_spec.eval_batch_size = FLAGS.resnet_eval_batch_size
    benchmark_spec.num_cores = FLAGS.resnet_num_cores
    benchmark_spec.data_format = FLAGS.resnet_data_format
    benchmark_spec.precision = FLAGS.resnet_precision
    benchmark_spec.commit = cloud_tpu_models.GetCommit(benchmark_spec.vms[0])
Esempio n. 6
0
def _UpdateBenchmarkSpecWithFlags(benchmark_spec):
    """Update the benchmark_spec with supplied command line flags.

  Args:
    benchmark_spec: benchmark specification to update
  """
    benchmark_spec.learning_rate = FLAGS.inception3_learning_rate
    benchmark_spec.train_steps = FLAGS.inception3_train_steps
    benchmark_spec.use_data = FLAGS.inception3_use_data
    benchmark_spec.mode = FLAGS.inception3_mode
    benchmark_spec.train_steps_per_eval = FLAGS.inception3_train_steps_per_eval
    benchmark_spec.save_checkpoints_secs = FLAGS.inception3_save_checkpoints_secs
    benchmark_spec.train_batch_size = FLAGS.inception3_train_batch_size
    benchmark_spec.eval_batch_size = FLAGS.inception3_eval_batch_size
    benchmark_spec.commit = cloud_tpu_models.GetCommit(benchmark_spec.vms[0])
Esempio n. 7
0
def _CreateMetadataDict(benchmark_spec):
    """Create metadata dict to be used in run results.

  Args:
    benchmark_spec: The benchmark specification. Contains all data that is
        required to run the benchmark.

  Returns:
    metadata dict
  """
    metadata = dict()
    metadata['train_file'] = benchmark_spec.train_file
    metadata['use_tpu'] = benchmark_spec.use_tpu
    metadata['model_dir'] = benchmark_spec.model_dir
    metadata['train_steps'] = benchmark_spec.train_steps
    metadata['master'] = benchmark_spec.master
    vm = benchmark_spec.vms[0]
    metadata['commit'] = cloud_tpu_models.GetCommit(vm)
    return metadata
Esempio n. 8
0
def _CreateMetadataDict(benchmark_spec):
    """Create metadata dict to be used in run results.

  Args:
    benchmark_spec: The benchmark specification. Contains all data that is
        required to run the benchmark.

  Returns:
    metadata dict
  """
    return {
        'train_file': benchmark_spec.train_file,
        'use_tpu': benchmark_spec.use_tpu,
        'model_dir': benchmark_spec.model_dir,
        'train_steps': benchmark_spec.train_steps,
        'master': benchmark_spec.master,
        'commit': cloud_tpu_models.GetCommit(benchmark_spec.vms[0]),
        'iterations': benchmark_spec.iterations
    }
def _UpdateBenchmarkSpecWithFlags(benchmark_spec):
  """Update the benchmark_spec with supplied command line flags.

  Args:
    benchmark_spec: benchmark specification to update
  """
  benchmark_spec.learning_rate = FLAGS.inception3_learning_rate
  benchmark_spec.train_steps = FLAGS.inception3_train_steps
  benchmark_spec.iterations = FLAGS.inception3_iterations
  benchmark_spec.use_tpu = benchmark_spec.cloud_tpu is not None
  benchmark_spec.use_data = FLAGS.inception3_use_data
  benchmark_spec.mode = FLAGS.inception3_mode
  benchmark_spec.train_steps_per_eval = FLAGS.inception3_train_steps_per_eval
  benchmark_spec.data_dir = FLAGS.inception3_data_dir
  benchmark_spec.save_checkpoints_secs = FLAGS.inception3_save_checkpoints_secs
  benchmark_spec.train_batch_size = FLAGS.inception3_train_batch_size
  benchmark_spec.eval_batch_size = FLAGS.inception3_eval_batch_size
  benchmark_spec.master = 'grpc://{ip}:{port}'.format(
      ip=benchmark_spec.cloud_tpu.GetCloudTpuIp(),
      port=benchmark_spec.cloud_tpu.GetCloudTpuPort()
  ) if benchmark_spec.use_tpu else ''
  benchmark_spec.commit = cloud_tpu_models.GetCommit(benchmark_spec.vms[0])