def _CreateMetadataDict(benchmark_spec): """Create metadata dict to be used in run results. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. Returns: metadata dict """ return mnist_benchmark.CreateMetadataDict(benchmark_spec)
def _CreateMetadataDict(benchmark_spec): """Create metadata dict to be used in run results. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. Returns: metadata dict """ metadata = mnist_benchmark.CreateMetadataDict(benchmark_spec) metadata.update({ 'model': benchmark_spec.model, 'problem': benchmark_spec.problem, 'hparams_set': benchmark_spec.hparams_set, 'data_dir': benchmark_spec.data_dir, 'model_dir': benchmark_spec.model_dir, 'train_steps': benchmark_spec.train_steps, 'eval_steps': benchmark_spec.eval_steps}) return metadata
def _CreateMetadataDict(benchmark_spec): """Create metadata dict to be used in run results. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. Returns: metadata dict """ metadata = mnist_benchmark.CreateMetadataDict(benchmark_spec) metadata.update({ 'depth': benchmark_spec.depth, 'mode': benchmark_spec.mode, 'data_format': benchmark_spec.data_format, 'precision': benchmark_spec.precision, 'skip_host_call': benchmark_spec.skip_host_call, 'epochs_per_eval': benchmark_spec.epochs_per_eval, 'steps_per_eval': benchmark_spec.steps_per_eval }) return metadata
def _CreateMetadataDict(benchmark_spec): """Create metadata dict to be used in run results. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. Returns: metadata dict """ metadata = mnist_benchmark.CreateMetadataDict(benchmark_spec) metadata.update({ 'learning_rate': benchmark_spec.learning_rate, 'use_data': benchmark_spec.use_data, 'mode': benchmark_spec.mode, 'save_checkpoints_secs': benchmark_spec.save_checkpoints_secs, 'epochs_per_eval': benchmark_spec.epochs_per_eval, 'steps_per_eval': benchmark_spec.steps_per_eval, 'precision': benchmark_spec.precision }) return metadata