def define_imagenet_flags():
    resnet_run_loop.define_resnet_flags(
        resnet_size_choices=['18', '34', '50', '101', '152', '200'],
        dynamic_loss_scale=True,
        fp16_implementation=True)
    flags.adopt_module_key_flags(resnet_run_loop)
    flags_core.set_defaults()
Exemplo n.º 2
0
def define_mnist_flags():
    flags.DEFINE_integer('eval_secs', 10,
                         'How frequently to run evaluation step')
    flags.DEFINE_integer('ckpt_steps', 100,
                         'How frequently to save a model checkpoin')
    flags.DEFINE_integer('max_ckpts', 2,
                         'Maximum number of checkpoints to keep')
    flags.DEFINE_integer('max_steps', os.environ.get('MAX_STEPS', 100),
                         'Max steps')
    flags.DEFINE_integer('save_summary_steps', 10,
                         'How frequently to save TensorBoard summaries')
    flags.DEFINE_integer('log_step_count_steps', 10,
                         'How frequently to log loss & global steps/s')
    flags_core.define_base()
    flags_core.define_performance(num_parallel_calls=False)
    flags_core.define_image()
    data_dir = os.path.abspath(
        os.environ.get('PS_JOBSPACE', os.getcwd()) + '/data')
    model_dir = os.path.abspath(
        os.environ.get('PS_MODEL_PATH',
                       os.getcwd() + '/models') + '/mnist')
    export_dir = os.path.abspath(
        os.environ.get('PS_MODEL_PATH',
                       os.getcwd() + '/models'))
    flags.adopt_module_key_flags(flags_core)
    flags_core.set_defaults(
        data_dir=data_dir,
        model_dir=model_dir,
        export_dir=export_dir,
        train_epochs=int(os.environ.get('TRAIN_EPOCHS', 3)),
        epochs_between_evals=int(os.environ.get('EPOCHS_EVAL', 5)),
        batch_size=int(os.environ.get('BATCH_SIZE', 100)),
    )
Exemplo n.º 3
0
def define_cifar_flags():
    resnet_common.define_keras_flags(dynamic_loss_scale=False)

    flags_core.set_defaults(data_dir='/tmp/cifar10_data/cifar-10-batches-bin',
                            model_dir='/tmp/cifar10_model',
                            epochs_between_evals=10,
                            batch_size=128)
Exemplo n.º 4
0
def define_kline_flags():
    run_loop.define_resnet_flags(
        resnet_size_choices=['18', '34', '50', '101', '152', '200'])
    flags.adopt_module_key_flags(run_loop)
    flags_core.set_defaults(data_dir=os.environ.get('TF_DATA_DIR', '/tmp/kline_data'),
                            model_dir=os.environ.get('TF_MODEL_DIR', '/tmp/kline_model'),
                            train_epochs=100)
Exemplo n.º 5
0
def define_mnist_flags():
    flags_core.define_base()
    flags_core.define_image()
    flags.adopt_module_key_flags(flags_core)
    flags_core.set_defaults(data_dir='/tmp/mnist_data',
                            model_dir='/tmp/mnist_model',
                            batch_size=100,
                            train_epochs=40)
def define_cifar_flags():
    run_loop.define_resnet_flags()
    flags.adopt_module_key_flags(run_loop)
    flags_core.set_defaults(data_dir=os.environ.get('TF_DATA_DIR', '/tmp/cifar10_data'),
                            model_dir=os.environ.get('TF_MODEL_DIR', '/tmp/cifar10_model'),
                            resnet_size='32',
                            train_epochs=250,
                            epochs_between_evals=10,
                            batch_size=128)
Exemplo n.º 7
0
def define_mnist_flags():
    flags_core.define_base()
    flags_core.define_performance(num_parallel_calls=False)
    flags_core.define_image()
    flags.adopt_module_key_flags(flags_core)
    flags_core.set_defaults(data_dir='/tmp/mnist_data',
                            model_dir='/tmp/mnist_model',
                            batch_size=100,
                            train_epochs=40)
Exemplo n.º 8
0
def define_census_flags():
    wide_deep_run_loop.define_wide_deep_flags()
    flags.adopt_module_key_flags(wide_deep_run_loop)
    flags_core.set_defaults(data_dir='census_data',
                            model_dir='census_model',
                            train_epochs=50,
                            epochs_between_evals=50,
                            inter_op_parallelism_threads=0,
                            intra_op_parallelism_threads=0,
                            batch_size=40,
                            infer=False)
Exemplo n.º 9
0
def define_census_flags():
    wide_deep_run_loop.define_wide_deep_flags()
    flags.adopt_module_key_flags(wide_deep_run_loop)
    flags_core.set_defaults(data_dir='../data',
                            model_dir='./census_model',
                            train_epochs=100,
                            model_type='wide_deep',
                            epochs_between_evals=2,
                            inter_op_parallelism_threads=0,
                            intra_op_parallelism_threads=0,
                            batch_size=128)
Exemplo n.º 10
0
def define_mnist_flags():
    flags_core.define_base()
    flags_core.define_performance(num_parallel_calls=False)
    flags_core.define_image()
    flags.adopt_module_key_flags(flags_core)
    flags_core.set_defaults(
        data_dir=
        "/workspace/zigangzhao/TensoFlowBDD/models/official/mnist/mnist_data",
        model_dir=
        "/workspace/zigangzhao/TensoFlowBDD/models/official/mnist/mnist_model/",
        batch_size=100,
        train_epochs=40)
Exemplo n.º 11
0
    def test_benchmark_setting(self):
        defaults = dict(
            hooks=["LoggingMetricHook"],
            benchmark_log_dir="/tmp/12345",
            gcp_project="project_abc",
        )

        flags_core.set_defaults(**defaults)
        flags_core.parse_flags()

        for key, value in defaults.items():
            assert flags.FLAGS.get_flag_value(name=key, default=None) == value
Exemplo n.º 12
0
def define_mnist_flags():
    flags_core.define_base()
    flags_core.define_performance(num_parallel_calls=False)
    flags_core.define_image()
    data_dir = os.path.abspath(os.environ.get('PS_JOBSPACE', os.getcwd()) + '/data')
    model_dir = os.path.abspath(os.environ.get('PS_MODEL_PATH', os.getcwd() + '/models') + '/mnist')
    flags.adopt_module_key_flags(flags_core)
    flags_core.set_defaults(data_dir=data_dir,
                            model_dir=model_dir,
                            export_dir=os.environ.get('PS_MODEL_PATH', os.getcwd() + '/models'),
                            batch_size=int(os.environ.get('batch_size', 100)),
                            epochs_between_evals=20,
                            train_epochs=int(os.environ.get('train_epochs', 40)))
Exemplo n.º 13
0
 def _setup(self):
     """Sets up and resets flags before each test."""
     assert tf.version.VERSION.startswith('2.')
     tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
     if NCFKerasBenchmarkBase.local_flags is None:
         ncf_common.define_ncf_flags()
         # Loads flags to get defaults to then override. List cannot be
         # empty.
         flags.FLAGS(['foo'])
         core.set_defaults(**self.default_flags)
         saved_flag_values = flagsaver.save_flag_values()
         NCFKerasBenchmarkBase.local_flags = saved_flag_values
     else:
         flagsaver.restore_flag_values(NCFKerasBenchmarkBase.local_flags)
Exemplo n.º 14
0
def define_wide_deep_flags():
  """Add supervised learning flags, as well as wide-deep model type."""
  flags_core.define_base()
  flags_core.define_benchmark()

  flags.adopt_module_key_flags(flags_core)

  flags.DEFINE_enum(
      name="model_type", short_name="mt", default="wide_deep",
      enum_values=['wide', 'deep', 'wide_deep'],
      help="Select model topology.")

  flags_core.set_defaults(data_dir='./data/stock_data',
                          model_dir='./data/stock_model',
                          train_epochs=40,
                          epochs_between_evals=2,
                          batch_size=40)
Exemplo n.º 15
0
    def test_get_nondefault_flags_as_str(self):
        defaults = dict(clean=True,
                        data_dir="abc",
                        hooks=["LoggingTensorHook"],
                        stop_threshold=1.5,
                        use_synthetic_data=False)
        flags_core.set_defaults(**defaults)
        flags_core.parse_flags()

        expected_flags = ""
        self.assertEqual(flags_core.get_nondefault_flags_as_str(),
                         expected_flags)

        flags.FLAGS.clean = False
        expected_flags += "--noclean"
        self.assertEqual(flags_core.get_nondefault_flags_as_str(),
                         expected_flags)

        flags.FLAGS.data_dir = "xyz"
        expected_flags += " --data_dir=xyz"
        self.assertEqual(flags_core.get_nondefault_flags_as_str(),
                         expected_flags)

        flags.FLAGS.hooks = ["aaa", "bbb", "ccc"]
        expected_flags += " --hooks=aaa,bbb,ccc"
        self.assertEqual(flags_core.get_nondefault_flags_as_str(),
                         expected_flags)

        flags.FLAGS.stop_threshold = 3.
        expected_flags += " --stop_threshold=3.0"
        self.assertEqual(flags_core.get_nondefault_flags_as_str(),
                         expected_flags)

        flags.FLAGS.use_synthetic_data = True
        expected_flags += " --use_synthetic_data"
        self.assertEqual(flags_core.get_nondefault_flags_as_str(),
                         expected_flags)

        # Assert that explicit setting a flag to its default value does not cause it
        # to appear in the string
        flags.FLAGS.use_synthetic_data = False
        expected_flags = expected_flags[:-len(" --use_synthetic_data")]
        self.assertEqual(flags_core.get_nondefault_flags_as_str(),
                         expected_flags)
Exemplo n.º 16
0
    def test_default_setting(self):
        """Test to ensure fields exist and defaults can be set.
    """

        defaults = dict(data_dir="dfgasf",
                        model_dir="dfsdkjgbs",
                        train_epochs=534,
                        epochs_between_evals=15,
                        batch_size=256,
                        hooks=["LoggingTensorHook"],
                        num_parallel_calls=18,
                        inter_op_parallelism_threads=5,
                        intra_op_parallelism_threads=10,
                        data_format="channels_first")

        flags_core.set_defaults(**defaults)
        flags_core.parse_flags()

        for key, value in defaults.items():
            assert flags.FLAGS.get_flag_value(name=key, default=None) == value
Exemplo n.º 17
0
def define_meal_flags():
    """Define flags for meal dataset training."""
    wide_deep_run_loop.define_wide_deep_flags()
    flags.DEFINE_enum(
      name="dataset", default=meals.FOOD,
      enum_values=meals.DATASETS, case_sensitive=False,
      help=flags_core.help_wrap("Dataset to be trained and evaluated."))
    flags.adopt_module_key_flags(wide_deep_run_loop)
    flags_core.set_defaults(data_dir="../../dataset/csv_file/",
                            model_dir='../model_saved_ckp/',
                            train_epochs=150,
                            epochs_between_evals=5,
                            inter_op_parallelism_threads=0,
                            intra_op_parallelism_threads=0,
                            batch_size=40)

    @flags.validator("stop_threshold",
                   message="stop_threshold not supported for meals model")
    def _no_stop(stop_threshold):
        return stop_threshold is None
Exemplo n.º 18
0
def define_mnist_eager_flags():
    """Defined flags and defaults for MNIST in eager mode."""
    flags_core.define_base_eager()
    flags_core.define_image()
    flags.adopt_module_key_flags(flags_core)

    flags.DEFINE_integer(
        name='log_interval',
        short_name='li',
        default=10,
        help=flags_core.help_wrap('batches between logging training status'))

    flags.DEFINE_string(
        name='output_dir',
        short_name='od',
        default='/tmp/tensorflow/mnist/',
        help=flags_core.help_wrap('Directory to write TensorBoard summaries'))

    flags.DEFINE_float(name='learning_rate',
                       short_name='lr',
                       default=0.01,
                       help=flags_core.help_wrap('Learning rate.'))

    flags.DEFINE_float(name='momentum',
                       short_name='m',
                       default=0.5,
                       help=flags_core.help_wrap('SGD momentum.'))

    flags.DEFINE_bool(name='no_gpu',
                      short_name='nogpu',
                      default=False,
                      help=flags_core.help_wrap(
                          'disables GPU usage even if a GPU is available'))

    flags_core.set_defaults(
        data_dir='/tmp/tensorflow/mnist/input_data',
        model_dir='/tmp/tensorflow/mnist/checkpoints/',
        batch_size=100,
        train_epochs=10,
    )
Exemplo n.º 19
0
def define_movie_flags():
    """Define flags for movie dataset training."""
    wide_deep_run_loop.define_wide_deep_flags()
    flags.DEFINE_enum(
        name="dataset",
        default=movielens.ML_1M,
        enum_values=movielens.DATASETS,
        case_sensitive=False,
        help=flags_core.help_wrap("Dataset to be trained and evaluated."))
    flags.adopt_module_key_flags(wide_deep_run_loop)
    flags_core.set_defaults(data_dir="/tmp/movielens-data/",
                            model_dir='/tmp/movie_model',
                            model_type="deep",
                            train_epochs=50,
                            epochs_between_evals=5,
                            inter_op_parallelism_threads=0,
                            intra_op_parallelism_threads=0,
                            batch_size=256)

    @flags.validator("stop_threshold",
                     message="stop_threshold not supported for movielens model"
                     )
    def _no_stop(stop_threshold):
        return stop_threshold is None
Exemplo n.º 20
0
def define_imagenet_flags():
	resnet_run_loop.define_resnet_flags(
			resnet_size_choices=['18', '34', '50', '101', '152', '200'])
	flags.adopt_module_key_flags(resnet_run_loop)
	flags_core.set_defaults(train_epochs=90)
Exemplo n.º 21
0
                                     dataset=dataset,
                                     data_dir=data_dir,
                                     batch_size=batch_size,
                                     repeat=repeat,
                                     shuffle=movielens.NUM_RATINGS[dataset])
    eval_input_fn = _df_to_input_fn(df=eval_df,
                                    name="eval",
                                    dataset=dataset,
                                    data_dir=data_dir,
                                    batch_size=batch_size,
                                    repeat=repeat,
                                    shuffle=None)
    model_column_fn = functools.partial(build_model_columns, dataset=dataset)

    train_input_fn()
    return train_input_fn, eval_input_fn, model_column_fn


def main(_):
    movielens.download(dataset=flags.FLAGS.dataset,
                       data_dir=flags.FLAGS.data_dir)
    construct_input_fns(flags.FLAGS.dataset, flags.FLAGS.data_dir)


if __name__ == "__main__":
    tf.logging.set_verbosity(tf.logging.INFO)
    movielens.define_data_download_flags()
    flags.adopt_module_key_flags(movielens)
    flags_core.set_defaults(dataset="ml-1m")
    absl_app.run(main)
Exemplo n.º 22
0
def define_ncf_flags():
    """Add flags for running ncf_main."""
    # Add common flags
    flags_core.define_base(model_dir=True, clean=True, train_epochs=True,
                           epochs_between_evals=True, export_dir=False,
                           run_eagerly=True, stop_threshold=True, num_gpu=True,
                           hooks=True, distribution_strategy=True)
    flags_core.define_performance(
        synthetic_data=True,
        dtype=True,
        fp16_implementation=True,
        loss_scale=True,
        dynamic_loss_scale=True,
        enable_xla=True,
        force_v2_in_keras_compile=True
    )
    flags_core.define_device(tpu=True)
    flags_core.define_benchmark()

    flags.adopt_module_key_flags(flags_core)

    flags_core.set_defaults(
        model_dir="/tmp/ncf/",
        data_dir="/tmp/movielens-data/",
        train_epochs=2,
        batch_size=256,
        hooks="ProfilerHook",
        tpu=None
    )

    # Add ncf-specific flags
    flags.DEFINE_enum(
        name="dataset", default="ml-1m",
        enum_values=["ml-1m", "ml-20m", "ml-20mx16x32"], case_sensitive=False,
        help=flags_core.help_wrap(
            "Dataset to be trained and evaluated."))

    flags.DEFINE_boolean(
        name="download_if_missing", default=True, help=flags_core.help_wrap(
            "Download data to data_dir if it is not already present."))

    flags.DEFINE_integer(
        name="eval_batch_size", default=None, help=flags_core.help_wrap(
            "The batch size used for evaluation. This should generally be larger"
            "than the training batch size as the lack of back propagation during"
            "evaluation can allow for larger batch sizes to fit in memory. If not"
            "specified, the training batch size (--batch_size) will be used."))

    flags.DEFINE_integer(
        name="num_factors", default=8,
        help=flags_core.help_wrap("The Embedding size of MF model."))

    # Set the default as a list of strings to be consistent with input
    # arguments
    flags.DEFINE_list(
        name="layers", default=["64", "32", "16", "8"],
        help=flags_core.help_wrap(
            "The sizes of hidden layers for MLP. Example "
            "to specify different sizes of MLP layers: --layers=32,16,8,4"))

    flags.DEFINE_float(
        name="mf_regularization", default=0., help=flags_core.help_wrap(
            "The regularization factor for MF embeddings. The factor is used by "
            "regularizer which allows to apply penalties on layer parameters or "
            "layer activity during optimization."))

    flags.DEFINE_list(
        name="mlp_regularization", default=[
            "0.", "0.", "0.", "0."], help=flags_core.help_wrap(
            "The regularization factor for each MLP layer. See mf_regularization "
            "help for more info about regularization factor."))

    flags.DEFINE_integer(name="num_neg", default=4, help=flags_core.help_wrap(
        "The Number of negative instances to pair with a positive instance."))

    flags.DEFINE_float(
        name="learning_rate", default=0.001,
        help=flags_core.help_wrap("The learning rate."))

    flags.DEFINE_float(name="beta1", default=0.9, help=flags_core.help_wrap(
        "beta1 hyperparameter for the Adam optimizer."))

    flags.DEFINE_float(name="beta2", default=0.999, help=flags_core.help_wrap(
        "beta2 hyperparameter for the Adam optimizer."))

    flags.DEFINE_float(
        name="epsilon", default=1e-8,
        help=flags_core.help_wrap("epsilon hyperparameter for the Adam "
                                  "optimizer."))

    flags.DEFINE_float(
        name="hr_threshold", default=1.0,
        help=flags_core.help_wrap(
            "If passed, training will stop when the evaluation metric HR is "
            "greater than or equal to hr_threshold. For dataset ml-1m, the "
            "desired hr_threshold is 0.68 which is the result from the paper; "
            "For dataset ml-20m, the threshold can be set as 0.95 which is "
            "achieved by MLPerf implementation."))

    flags.DEFINE_enum(
        name="constructor_type", default="bisection",
        enum_values=["bisection", "materialized"], case_sensitive=False,
        help=flags_core.help_wrap(
            "Strategy to use for generating false negatives. materialized has a"
            "precompute that scales badly, but a faster per-epoch construction"
            "time and can be faster on very large systems."))

    flags.DEFINE_string(
        name="train_dataset_path",
        default=None,
        help=flags_core.help_wrap("Path to training data."))

    flags.DEFINE_string(
        name="eval_dataset_path",
        default=None,
        help=flags_core.help_wrap("Path to evaluation data."))

    flags.DEFINE_string(
        name="input_meta_data_path",
        default=None,
        help=flags_core.help_wrap("Path to input meta data file."))

    flags.DEFINE_bool(
        name="ml_perf", default=False, help=flags_core.help_wrap(
            "If set, changes the behavior of the model slightly to match the "
            "MLPerf reference implementations here: \n"
            "https://github.com/mlperf/reference/tree/master/recommendation/"
            "pytorch\n"
            "The two changes are:\n"
            "1. When computing the HR and NDCG during evaluation, remove "
            "duplicate user-item pairs before the computation. This results in "
            "better HRs and NDCGs.\n"
            "2. Use a different soring algorithm when sorting the input data, "
            "which performs better due to the fact the sorting algorithms are "
            "not stable."))

    flags.DEFINE_bool(
        name="output_ml_perf_compliance_logging",
        default=False,
        help=flags_core.help_wrap(
            "If set, output the MLPerf compliance logging. This is only useful "
            "if one is running the model for MLPerf. See "
            "https://github.com/mlperf/policies/blob/master/training_rules.adoc"
            "#submission-compliance-logs for details. This uses sudo and so may "
            "ask for your password, as root access is needed to clear the system "
            "caches, which is required for MLPerf compliance."))

    flags.DEFINE_integer(
        name="seed", default=None, help=flags_core.help_wrap(
            "This value will be used to seed both NumPy and TensorFlow."))

    @flags.validator("eval_batch_size", "eval_batch_size must be at least {}"
                     .format(rconst.NUM_EVAL_NEGATIVES + 1))
    def eval_size_check(eval_batch_size):
        return (eval_batch_size is None or
                int(eval_batch_size) > rconst.NUM_EVAL_NEGATIVES)

    flags.DEFINE_bool(
        name="use_xla_for_gpu", default=False, help=flags_core.help_wrap(
            "If True, use XLA for the model function. Only works when using a "
            "GPU. On TPUs, XLA is always used"))

    xla_message = "--use_xla_for_gpu is incompatible with --tpu"
    @flags.multi_flags_validator(
        ["use_xla_for_gpu", "tpu"], message=xla_message)
    def xla_validator(flag_dict):
        return not flag_dict["use_xla_for_gpu"] or not flag_dict["tpu"]

    flags.DEFINE_bool(
        name="early_stopping",
        default=False,
        help=flags_core.help_wrap(
            "If True, we stop the training when it reaches hr_threshold"))

    flags.DEFINE_bool(
        name="keras_use_ctl",
        default=False,
        help=flags_core.help_wrap(
            "If True, we use a custom training loop for keras."))
def define_keras_benchmark_flags():
    """Add flags for keras built-in application models."""
    flags_core.define_base(hooks=False)
    flags_core.define_performance()
    flags_core.define_image()
    flags_core.define_benchmark()
    flags.adopt_module_key_flags(flags_core)

    flags_core.set_defaults(data_format="channels_last",
                            use_synthetic_data=True,
                            batch_size=32,
                            train_epochs=2)

    flags.DEFINE_enum(name="model",
                      default=None,
                      enum_values=MODELS.keys(),
                      case_sensitive=False,
                      help=flags_core.help_wrap("Model to be benchmarked."))

    flags.DEFINE_integer(
        name="num_train_images",
        default=1000,
        help=flags_core.help_wrap(
            "The number of synthetic images for training. The default value is "
            "1000."))

    flags.DEFINE_integer(
        name="num_eval_images",
        default=50,
        help=flags_core.help_wrap(
            "The number of synthetic images for evaluation. The default value is "
            "50."))

    flags.DEFINE_boolean(
        name="eager",
        default=False,
        help=flags_core.help_wrap(
            "To enable eager execution. Note that if eager execution is enabled, "
            "only one GPU is utilized even if multiple GPUs are provided and "
            "multi_gpu_model is used."))

    flags.DEFINE_boolean(
        name="dist_strat",
        default=False,
        help=flags_core.help_wrap(
            "To enable distribution strategy for model training and evaluation. "
            "Number of GPUs used for distribution strategy can be set by the "
            "argument --num_gpus."))

    flags.DEFINE_list(
        name="callbacks",
        default=["ExamplesPerSecondCallback", "LoggingMetricCallback"],
        help=flags_core.help_wrap(
            "A list of (case insensitive) strings to specify the names of "
            "callbacks. For example: `--callbacks ExamplesPerSecondCallback,"
            "LoggingMetricCallback`"))

    @flags.multi_flags_validator(
        ["eager", "dist_strat"],
        message="Both --eager and --dist_strat were set. Only one can be "
        "defined, as DistributionStrategy is not supported in Eager "
        "execution currently.")
    # pylint: disable=unused-variable
    def _check_eager_dist_strat(flag_dict):
        return not (flag_dict["eager"] and flag_dict["dist_strat"])
Exemplo n.º 24
0
def define_transformer_flags():
  """Add flags and flag validators for running transformer_main."""
  # Add common flags (data_dir, model_dir, train_epochs, etc.).
  flags_core.define_base()
  flags_core.define_performance(
      num_parallel_calls=True,
      inter_op=False,
      intra_op=False,
      synthetic_data=True,
      max_train_steps=False,
      dtype=False,
      all_reduce_alg=True
  )
  flags_core.define_benchmark()
  flags_core.define_device(tpu=True)

  # Set flags from the flags_core module as "key flags" so they're listed when
  # the '-h' flag is used. Without this line, the flags defined above are
  # only shown in the full `--helpful` help text.
  flags.adopt_module_key_flags(flags_core)

  # Add transformer-specific flags
  flags.DEFINE_enum(
      name="param_set", short_name="mp", default="big",
      enum_values=PARAMS_MAP.keys(),
      help=flags_core.help_wrap(
          "Parameter set to use when creating and training the model. The "
          "parameters define the input shape (batch size and max length), "
          "model configuration (size of embedding, # of hidden layers, etc.), "
          "and various other settings. The big parameter set increases the "
          "default batch size, embedding/hidden size, and filter size. For a "
          "complete list of parameters, please see model/model_params.py."))

  flags.DEFINE_bool(
      name="static_batch", default=False,
      help=flags_core.help_wrap(
          "Whether the batches in the dataset should have static shapes. In "
          "general, this setting should be False. Dynamic shapes allow the "
          "inputs to be grouped so that the number of padding tokens is "
          "minimized, and helps model training. In cases where the input shape "
          "must be static (e.g. running on TPU), this setting will be ignored "
          "and static batching will always be used."))

  # Flags for training with steps (may be used for debugging)
  flags.DEFINE_integer(
      name="train_steps", short_name="ts", default=None,
      help=flags_core.help_wrap("The number of steps used to train."))
  flags.DEFINE_integer(
      name="steps_between_evals", short_name="sbe", default=1000,
      help=flags_core.help_wrap(
          "The Number of training steps to run between evaluations. This is "
          "used if --train_steps is defined."))

  # BLEU score computation
  flags.DEFINE_string(
      name="bleu_source", short_name="bls", default=None,
      help=flags_core.help_wrap(
          "Path to source file containing text translate when calculating the "
          "official BLEU score. Both --bleu_source and --bleu_ref must be set. "
          "Use the flag --stop_threshold to stop the script based on the "
          "uncased BLEU score."))
  flags.DEFINE_string(
      name="bleu_ref", short_name="blr", default=None,
      help=flags_core.help_wrap(
          "Path to source file containing text translate when calculating the "
          "official BLEU score. Both --bleu_source and --bleu_ref must be set. "
          "Use the flag --stop_threshold to stop the script based on the "
          "uncased BLEU score."))
  flags.DEFINE_string(
      name="vocab_file", short_name="vf", default=None,
      help=flags_core.help_wrap(
          "Path to subtoken vocabulary file. If data_download.py was used to "
          "download and encode the training data, look in the data_dir to find "
          "the vocab file."))

  flags_core.set_defaults(data_dir="/tmp/translate_ende",
                          model_dir="/tmp/transformer_model",
                          batch_size=None,
                          train_epochs=None)

  @flags.multi_flags_validator(
      ["train_epochs", "train_steps"],
      message="Both --train_steps and --train_epochs were set. Only one may be "
              "defined.")
  def _check_train_limits(flag_dict):
    return flag_dict["train_epochs"] is None or flag_dict["train_steps"] is None

  @flags.multi_flags_validator(
      ["bleu_source", "bleu_ref"],
      message="Both or neither --bleu_source and --bleu_ref must be defined.")
  def _check_bleu_files(flags_dict):
    return (flags_dict["bleu_source"] is None) == (
        flags_dict["bleu_ref"] is None)

  @flags.multi_flags_validator(
      ["bleu_source", "bleu_ref", "vocab_file"],
      message="--vocab_file must be defined if --bleu_source and --bleu_ref "
              "are defined.")
  def _check_bleu_vocab_file(flags_dict):
    if flags_dict["bleu_source"] and flags_dict["bleu_ref"]:
      return flags_dict["vocab_file"] is not None
    return True

  @flags.multi_flags_validator(
      ["export_dir", "vocab_file"],
      message="--vocab_file must be defined if --export_dir is set.")
  def _check_export_vocab_file(flags_dict):
    if flags_dict["export_dir"]:
      return flags_dict["vocab_file"] is not None
    return True

  flags_core.require_cloud_storage(["data_dir", "model_dir", "export_dir"])
Exemplo n.º 25
0
def define_imagenet_keras_flags():
    common.define_keras_flags()
    flags_core.set_defaults()
    flags.adopt_module_key_flags(common)