コード例 #1
0
def main(_):

    flags.mark_flag_as_required(EXECUTION_INVOCATION_FLAG.name)
    flags.mark_flags_as_mutual_exclusive(
        (EXECUTABLE_SPEC_FLAG.name, BEAM_EXECUTABLE_SPEC_FLAG.name),
        required=True)

    execution_info = python_execution_binary_utils.deserialize_execution_info(
        EXECUTION_INVOCATION_FLAG.value)
    deserialized_executable_spec = None
    if BEAM_EXECUTABLE_SPEC_FLAG.value is not None:
        deserialized_executable_spec = (
            python_execution_binary_utils.deserialize_executable_spec(
                BEAM_EXECUTABLE_SPEC_FLAG.value, with_beam=True))
    else:
        deserialized_executable_spec = (
            python_execution_binary_utils.deserialize_executable_spec(
                EXECUTABLE_SPEC_FLAG.value, with_beam=False))
    logging.info('execution_info = %r\n', execution_info)
    logging.info('executable_spec = %s\n',
                 text_format.MessageToString(deserialized_executable_spec))

    # MLMD connection config being set indicates a driver execution instead of an
    # executor execution as accessing MLMD is not supported for executors.
    if MLMD_CONNECTION_CONFIG_FLAG.value:
        mlmd_connection_config = (
            python_execution_binary_utils.deserialize_mlmd_connection_config(
                MLMD_CONNECTION_CONFIG_FLAG.value))
        run_result = _run_driver(deserialized_executable_spec,
                                 mlmd_connection_config, execution_info)
    else:
        run_result = _run_executor(deserialized_executable_spec,
                                   execution_info)

    if run_result:
        with fileio.open(execution_info.execution_output_uri, 'wb') as f:
            f.write(run_result.SerializeToString())
コード例 #2
0
                FLAGS.sample_rate_key,
                FLAGS.label_key,
                FLAGS.speaker_id_key,
                FLAGS.average_over_time,
                FLAGS.delete_audio_from_output,
                output_filename,
                split_embeddings_into_separate_tables=FLAGS.
                split_embeddings_into_separate_tables,  # pylint:disable=line-too-long
                use_frontend_fn=FLAGS.use_frontend_fn,
                input_format=input_format,
                output_format=output_format,
                suffix=i)


if __name__ == '__main__':
    flags.mark_flags_as_required([
        'output_filename',
        'embedding_names',
        'embedding_modules',
        'module_output_keys',
        'audio_key',
        'label_key',
    ])
    flags.mark_flags_as_mutual_exclusive(['input_glob', 'tfds_dataset'],
                                         required=True)
    flags.mark_flags_as_mutual_exclusive(
        ['tfds_dataset', 'sample_rate_key', 'sample_rate'], required=True)
    tf.compat.v2.enable_v2_behavior()
    assert tf.executing_eagerly()
    app.run(main)
コード例 #3
0
        if not FLAGS.signer_key:
            raise app.UsageError("ERROR: --signer_key flag not set.")
        if not is_signature_valid(json_data, FLAGS.signature, FLAGS.signer_key):
            raise app.UsageError(
                "ERROR: Signature over list of logs is not valid.")

    parsed_json = json.loads(json_data)
    if not FLAGS.log_list_schema:
        raise app.UsageError("ERROR: --log_list_schema flag not set.")
    if not is_log_list_valid(parsed_json, FLAGS.log_list_schema):
        raise app.UsageError(
            "ERROR: Log list is signed but does not conform to the schema.", 2)
    if FLAGS.header_output:
        generate_cpp_header(parsed_json, FLAGS.header_output)
    if FLAGS.java_output:
        generate_java_source(parsed_json, FLAGS.java_output, FLAGS.java_class)
    if FLAGS.openssl_output:
        generate_openssl_conf(parsed_json, FLAGS.openssl_output)

    if not FLAGS.header_output and \
       not FLAGS.java_output and \
       not FLAGS.openssl_output:
        print_formatted_log_list(parsed_json)


if __name__ == "__main__":
    gflags.mark_flags_as_required(["log_list", "log_list_schema"])
    gflags.mark_flags_as_mutual_exclusive(["signature", "skip_signature_check"],
                                          required=True)
    app.run(main)
コード例 #4
0
    'wraped tf.examples as float featues using the feature'
    'key specified by --feature_key.')
flags.DEFINE_string(
    'feature_key', 'audio/reference/raw_audio',
    'Tf.example feature that contains the samples that are '
    'to be processed.')
flags.DEFINE_string('embeddings', None, 'The embeddings output file path.')
flags.DEFINE_string('stats', None, 'The stats output file path.')
flags.DEFINE_string('model_ckpt', 'data/vggish_model.ckpt',
                    'The model checkpoint that should be loaded.')
flags.DEFINE_integer('model_embedding_dim', 128,
                     'The model dimension of the models emedding layer.')
flags.DEFINE_integer('model_step_size', 8000,
                     'Number of samples between each extraced windown.')

flags.mark_flags_as_mutual_exclusive(['input_files', 'tfrecord_input'],
                                     required=True)
FLAGS = flags.FLAGS

ModelConfig = collections.namedtuple('ModelConfig',
                                     'model_ckpt embedding_dim step_size')


def main(unused_argv):
    if not FLAGS.embeddings and not FLAGS.stats:
        raise ValueError('No output provided. Please specify at least one of '
                         '"--embeddings" or "--stats".')
    pipeline = create_embeddings_beam.create_pipeline(
        tfrecord_input=FLAGS.tfrecord_input,
        files_input_list=FLAGS.input_files,
        feature_key=FLAGS.feature_key,
        embedding_model=ModelConfig(model_ckpt=FLAGS.model_ckpt,
コード例 #5
0
ファイル: kiwishot.py プロジェクト: AJubatus/Kiwishot
                  None,
                  'Take a screenshot of the active window',
                  short_name='a')
flags.DEFINE_bool('full',
                  None,
                  'Take a screenshot of the entire screen',
                  short_name='f')

flags.DEFINE_bool('upload', False, 'Upload to image host', short_name='u')

flags.DEFINE_string('save_location',
                    '/tmp/tmp.png',
                    'File to save screenshot to, default is /tmp/tmp.png',
                    short_name='s')

flags.mark_flags_as_mutual_exclusive(['region', 'active', 'full'])


def upload_image(filepath):
    """Uploads image to host and prints link to console"""
    print('Uploading screenshot')
    link = imgur.upload(filepath)
    print(link)


def take_screenshot(region=False, active=False, full=False, upload=True):
    """Takes screenshot based on given flags.

    Also pushes image to clipboard using xclip.
    if gui is not set, it also uploads the image.
    """
コード例 #6
0
from tensorflow_docs.api_generator import generate_lib
from tensorflow_docs.api_generator import public_api

PROJECT_SHORT_NAME = "tfra"
PROJECT_FULL_NAME = "TensorFlow Recommenders Addons"

FLAGS = flags.FLAGS

flags.DEFINE_string("git_branch",
                    default=None,
                    help="The name of the corresponding branch on github.")

CODE_PREFIX_TEMPLATE = "https://github.com/tensorflow/recommenders-addons/tree/{git_branch}/tensorflow_recommenders_addons"
flags.DEFINE_string("code_url_prefix", None,
                    "The url prefix for links to the code.")
flags.mark_flags_as_mutual_exclusive(["code_url_prefix", "git_branch"])

flags.DEFINE_string("output_dir", "/tmp/recommenders_addons_api",
                    "Where to output the docs")

flags.DEFINE_bool("search_hints", True,
                  "Include metadata search hints in the generated files")

flags.DEFINE_string("site_path", "recommenders-addons/api_docs/python",
                    "Path prefix in the _toc.yaml")


def _top_source_link(location):
    """Retrns a source link with Github image, like the notebook butons."""
    table_template = textwrap.dedent("""
    <table class="tfo-notebook-buttons tfo-api" align="left">
コード例 #7
0
from injector import Module, provider, singleton
from prometheus_client import Counter
from requests import RequestException, Response, Session, Timeout

from rep0st.db.post import Post, Status, post_type_from_media_path
from rep0st.db.tag import Tag
from rep0st.util import get_secret

log = logging.getLogger(__name__)
FLAGS = flags.FLAGS
flags.DEFINE_string('pr0gramm_api_user', None,
                    'Name of the pr0gramm user to use the API with.')
flags.DEFINE_string(
    'pr0gramm_api_user_file', None,
    'Path to the file containing the pr0gramm user to use the API with.')
flags.mark_flags_as_mutual_exclusive(
    ['pr0gramm_api_user', 'pr0gramm_api_user_file'], required=True)
flags.DEFINE_string('pr0gramm_api_password', None,
                    'Password of the pr0gramm user to use the API with.')
flags.DEFINE_string(
    'pr0gramm_api_password_file', None,
    'Path to the file containing the password of the pr0gramm user to use the API with.'
)
flags.mark_flags_as_mutual_exclusive(
    ['pr0gramm_api_password', 'pr0gramm_api_password_file'], required=True)
flags.DEFINE_string('pr0gramm_api_baseurl_api', 'https://pr0gramm.com/api',
                    'Baseurl for the pr0gramm API.')
flags.DEFINE_string('pr0gramm_api_baseurl_img', 'https://img.pr0gramm.com',
                    'Baseurl for the pr0gramm image API.')
flags.DEFINE_string('pr0gramm_api_baseurl_vid', 'https://vid.pr0gramm.com',
                    'Baseurl for the pr0gramm image API.')
flags.DEFINE_string('pr0gramm_api_baseurl_full', 'https://full.pr0gramm.com',
コード例 #8
0
flags.DEFINE_integer(
    "batch_size",
    None,
    "Batch size to use for unsupervised training. "
    "In the distributed context, this is the global batch size.",
    lower_bound=1,
)
flags.mark_flag_as_required("batch_size")

# Exactly one of these must be defined
flags.DEFINE_string("data_file", None,
                    "Path to a tfrecord to use as unsupervised data.")
flags.DEFINE_string(
    "data_listing", None,
    "Path to a newline-separated file specifying tfrecord paths.")
flags.mark_flags_as_mutual_exclusive(["data_file", "data_listing"],
                                     required=True)

# Flags to change if you're using a different dataset setup
flags.DEFINE_integer(
    "data_tilesize",
    160,
    "Tilesize of data used for unsupervised learning.",
    lower_bound=1,
)
flags.DEFINE_string(
    "data_feature_name",
    "spot_naip_phr",
    "Name of the key in the key-value dictionary containing unsupervised examples.",
)

# Optional flags to configure dataset loading
コード例 #9
0
ファイル: oobmpower.py プロジェクト: JSteeleIR/RPIOOBMPower
    import RPi.GPIO as GPIO
except RuntimeError:
    print("Error importing RPi.GPIO! You may need to re-run with sudo!")

from time import sleep
from absl import app
from absl import flags

FLAGS = flags.FLAGS

flags.DEFINE_bool('prompt', True, "Prompt to confirm the action")
flags.DEFINE_bool('power', None, "Press 'Power' button on the slave system")
flags.DEFINE_bool('reset', None, "Press 'Reset' button on the slave system")
flags.DEFINE_bool('force', False, "Force the power action. (Hard power-off)")

flags.mark_flags_as_mutual_exclusive(['power', 'reset'])

# Set GPIO to BCM mode.
GPIO.setmode(GPIO.BCM)

# Declare globals for the pin mappings.
POWERPIN = 23
RESETPIN = 24


def prompt():
    """
    Prompt the user to confirm the power/reset action.
    """

    yes = {'yes', 'y', 'ye'}
コード例 #10
0
                FLAGS.zinbwave_dims,
                FLAGS.zinbwave_epsilon,
                FLAGS.zinbwave_keep_variance,
                FLAGS.zinbwave_gene_covariate,
                metrics.silhouette,
                metrics.ami,
                metrics.ari,
                metrics.kmeans_silhouette,
                adata.n_obs,
                FLAGS.tissue,
                n_clusters,
            ])

        if FLAGS.output_h5ad:
            adata.write(FLAGS.output_h5ad)


if __name__ == '__main__':
    flags.mark_flags_as_mutual_exclusive(['input_loom', 'input_csvs'])
    flags.mark_flag_as_required('output_csv')
    flags.mark_flag_as_required('reduced_dim')
    flags.mark_flag_as_required('tissue')
    flags.mark_flag_as_required('source')
    flags.register_multi_flags_validator(
        flag_names=(
            ['source'] +
            list(itertools.chain.from_iterable(_SOURCE_TO_FLAGS.values()))),
        multi_flags_checker=check_flags_combination,
        message='Source and other flags are not compatible.')
    app.run(main)
コード例 #11
0
def define_ncf_flags():
    """Add flags for running ncf_main."""
    # Add common flags
    flags_core.define_base(export_dir=False)
    flags_core.define_performance(num_parallel_calls=False,
                                  inter_op=False,
                                  intra_op=False,
                                  synthetic_data=True,
                                  max_train_steps=False,
                                  dtype=False,
                                  all_reduce_alg=False)
    flags_core.define_device(tpu=True)
    flags_core.define_benchmark()

    flags.adopt_module_key_flags(flags_core)

    flags_core.set_defaults(model_dir="/tmp/ncf/",
                            data_dir="/tmp/movielens-data/",
                            train_epochs=2,
                            batch_size=256,
                            hooks="ProfilerHook",
                            tpu=None)

    # Add ncf-specific flags
    flags.DEFINE_enum(
        name="dataset",
        default="ml-1m",
        enum_values=["ml-1m", "ml-20m"],
        case_sensitive=False,
        help=flags_core.help_wrap("Dataset to be trained and evaluated."))

    flags.DEFINE_boolean(
        name="download_if_missing",
        default=True,
        help=flags_core.help_wrap(
            "Download data to data_dir if it is not already present."))

    flags.DEFINE_string(
        name="eval_batch_size",
        default=None,
        help=flags_core.help_wrap(
            "The batch size used for evaluation. This should generally be larger"
            "than the training batch size as the lack of back propagation during"
            "evaluation can allow for larger batch sizes to fit in memory. If not"
            "specified, the training batch size (--batch_size) will be used."))

    flags.DEFINE_integer(
        name="num_factors",
        default=8,
        help=flags_core.help_wrap("The Embedding size of MF model."))

    # Set the default as a list of strings to be consistent with input arguments
    flags.DEFINE_list(
        name="layers",
        default=["64", "32", "16", "8"],
        help=flags_core.help_wrap(
            "The sizes of hidden layers for MLP. Example "
            "to specify different sizes of MLP layers: --layers=32,16,8,4"))

    flags.DEFINE_float(
        name="mf_regularization",
        default=0.,
        help=flags_core.help_wrap(
            "The regularization factor for MF embeddings. The factor is used by "
            "regularizer which allows to apply penalties on layer parameters or "
            "layer activity during optimization."))

    flags.DEFINE_list(
        name="mlp_regularization",
        default=["0.", "0.", "0.", "0."],
        help=flags_core.help_wrap(
            "The regularization factor for each MLP layer. See mf_regularization "
            "help for more info about regularization factor."))

    flags.DEFINE_integer(
        name="num_neg",
        default=4,
        help=flags_core.help_wrap(
            "The Number of negative instances to pair with a positive instance."
        ))

    flags.DEFINE_float(name="learning_rate",
                       default=0.001,
                       help=flags_core.help_wrap("The learning rate."))

    flags.DEFINE_float(name="beta1",
                       default=0.9,
                       help=flags_core.help_wrap(
                           "beta1 hyperparameter for the Adam optimizer."))

    flags.DEFINE_float(name="beta2",
                       default=0.999,
                       help=flags_core.help_wrap(
                           "beta2 hyperparameter for the Adam optimizer."))

    flags.DEFINE_float(name="epsilon",
                       default=1e-8,
                       help=flags_core.help_wrap(
                           "epsilon hyperparameter for the Adam "
                           "optimizer."))

    flags.DEFINE_float(
        name="hr_threshold",
        default=None,
        help=flags_core.help_wrap(
            "If passed, training will stop when the evaluation metric HR is "
            "greater than or equal to hr_threshold. For dataset ml-1m, the "
            "desired hr_threshold is 0.68 which is the result from the paper; "
            "For dataset ml-20m, the threshold can be set as 0.95 which is "
            "achieved by MLPerf implementation."))

    flags.DEFINE_bool(
        name="ml_perf",
        default=None,
        help=flags_core.help_wrap(
            "If set, changes the behavior of the model slightly to match the "
            "MLPerf reference implementations here: \n"
            "https://github.com/mlperf/reference/tree/master/recommendation/"
            "pytorch\n"
            "The two changes are:\n"
            "1. When computing the HR and NDCG during evaluation, remove "
            "duplicate user-item pairs before the computation. This results in "
            "better HRs and NDCGs.\n"
            "2. Use a different soring algorithm when sorting the input data, "
            "which performs better due to the fact the sorting algorithms are "
            "not stable."))

    flags.DEFINE_integer(
        name="seed",
        default=None,
        help=flags_core.help_wrap(
            "This value will be used to seed both NumPy and TensorFlow."))

    flags.DEFINE_bool(
        name="hash_pipeline",
        default=False,
        help=flags_core.help_wrap(
            "This flag will perform a separate run of the pipeline and hash "
            "batches as they are produced. \nNOTE: this will significantly slow "
            "training. However it is useful to confirm that a random seed is "
            "does indeed make the data pipeline deterministic."))

    @flags.validator("eval_batch_size",
                     "eval_batch_size must be at least {}".format(
                         rconst.NUM_EVAL_NEGATIVES + 1))
    def eval_size_check(eval_batch_size):
        return (eval_batch_size is None
                or int(eval_batch_size) > rconst.NUM_EVAL_NEGATIVES)

    flags.DEFINE_bool(
        name="use_subprocess",
        default=True,
        help=flags_core.help_wrap(
            "By default, ncf_main.py starts async data generation process as a "
            "subprocess. If set to False, ncf_main.py will assume the async data "
            "generation process has already been started by the user."))

    flags.DEFINE_integer(
        name="cache_id",
        default=None,
        help=flags_core.help_wrap(
            "Use a specified cache_id rather than using a timestamp. This is only "
            "needed to synchronize across multiple workers. Generally this flag will "
            "not need to be set."))

    flags.DEFINE_bool(
        name="use_xla_for_gpu",
        default=False,
        help=flags_core.help_wrap(
            "If True, use XLA for the model function. Only works when using a "
            "GPU. On TPUs, XLA is always used"))

    flags.mark_flags_as_mutual_exclusive(["use_xla_for_gpu", "tpu"])
コード例 #12
0
from absl import flags

FLAGS = flags.FLAGS

flags.DEFINE_string('host', 'localhost',
                    'Vespa http search front-end to connect to.')

flags.DEFINE_integer('port', 8080, 'Port of the search front-end.')

flags.DEFINE_string(
    'yql', None, 'Complete YQL Query. May or may not be terminated with \';\'')

flags.DEFINE_string('query', None,
                    'Simple Vespa query. Mutually exclusive with --yql')

flags.mark_flags_as_mutual_exclusive(('yql', 'query'), required=True)

flags.DEFINE_boolean('ssl', False, 'Should the query be issued over https.')

flags.DEFINE_list(
    'param', [], 'Parameters to be added to the request. Comma separated list.'
    'E.g. hitcountestimate=True,tracelevel=2')


def parse_args(argv):
    if len(argv) == 1:
        print('Options:\n%s' % FLAGS)
        exit(1)
    flags.FLAGS(argv)

コード例 #13
0
ファイル: keras_doctest.py プロジェクト: bhardwajRahul/keras
import tensorflow.compat.v2 as tf

tf.compat.v1.enable_v2_behavior()

# We put doctest after absltest so that it picks up the unittest monkeypatch.
# Otherwise doctest tests aren't runnable at all.
import doctest  # pylint: disable=g-import-not-at-top,g-bad-import-order

FLAGS = flags.FLAGS

flags.DEFINE_string("module", None, "A specific module to run doctest on.")
flags.DEFINE_boolean("list", None,
                     "List all the modules in the core package imported.")
flags.DEFINE_string("file", None, "A specific file to run doctest on.")

flags.mark_flags_as_mutual_exclusive(["module", "file"])
flags.mark_flags_as_mutual_exclusive(["list", "file"])

PACKAGE = "keras."


def find_modules():
    """Finds all the modules in the core package imported.

    Returns:
      A list containing all the modules in tensorflow.python.
    """

    tf_modules = []
    for name, module in sys.modules.items():
        if name.startswith(PACKAGE):
コード例 #14
0
      'quantize_aware_training': FLAGS.qat,
      'tflite': FLAGS.tflite,
  }
  if not tf.io.gfile.exists(FLAGS.output_directory):
    tf.io.gfile.makedirs(FLAGS.output_directory)
  load_and_write_model(
      keras_model_args, checkpoint_to_load, FLAGS.output_directory)
  assert tf.io.gfile.exists(FLAGS.output_directory)
  logging.info('Successfully wrote to: %s', FLAGS.output_directory)

  # Sanity check the resulting model.
  logging.info('Starting sanity check...')
  model = tf.saved_model.load(FLAGS.output_directory)

  if FLAGS.frontend:
    input_np = np.zeros([2, 32000], dtype=np.float32)
    expected_output_shape = (14, 2048)
  else:
    input_np = np.zeros([7, 96, 64, 1], dtype=np.float32)
    expected_output_shape = (7, 2048)

  np.testing.assert_array_equal(
      model(input_np)['embedding'].numpy().shape,
      expected_output_shape)
  logging.info('Passed sanity check.')

if __name__ == '__main__':
  flags.mark_flags_as_required(['output_directory'])
  flags.mark_flags_as_mutual_exclusive(['logdir', 'checkpoint_filename'])
  app.run(main)
コード例 #15
0
                    default=None,
                    help='The name of the corresponding branch on github.')

flags.DEFINE_string("output_dir", "/tmp/io_api", "Where to output the docs")

CODE_PREFIX_TEMPLATE = "https://github.com/tensorflow/io/tree/{git_branch}/tensorflow_io"
flags.DEFINE_string("code_url_prefix", None,
                    "The url prefix for links to the code.")

flags.DEFINE_bool("search_hints", True,
                  "Include metadata search hints in the generated files")

flags.DEFINE_string("site_path", "io/api_docs/python",
                    "Path prefix in the _toc.yaml")

flags.mark_flags_as_mutual_exclusive(['code_url_prefix', 'git_branch'])


def main(argv):
    if argv[1:]:
        raise ValueError('Unrecognized arguments: {}'.format(argv[1:]))

    if FLAGS.git_branch:
        code_url_prefix = CODE_PREFIX_TEMPLATE.format(
            git_branch=FLAGS.git_branch)
    elif FLAGS.code_url_prefix:
        code_url_prefix = FLAGS.code_url_prefix
    else:
        code_url_prefix = CODE_PREFIX_TEMPLATE.format(git_branch='master')

    doc_generator = generate_lib.DocGenerator(
コード例 #16
0
import tensorflow.compat.v2 as tf

tf.compat.v1.enable_v2_behavior()

# We put doctest after absltest so that it picks up the unittest monkeypatch.
# Otherwise doctest tests aren't runnable at all.
import doctest  # pylint: disable=g-import-not-at-top, g-bad-import-order

FLAGS = flags.FLAGS

flags.DEFINE_string('module', None, 'A specific module to run doctest on.')
flags.DEFINE_boolean('list', None,
                     'List all the modules in the core package imported.')
flags.DEFINE_string('file', None, 'A specific file to run doctest on.')

flags.mark_flags_as_mutual_exclusive(['module', 'file'])
flags.mark_flags_as_mutual_exclusive(['list', 'file'])

PACKAGE = 'tensorflow.python.'


def find_modules():
    """Finds all the modules in the core package imported.

  Returns:
    A list containing all the modules in tensorflow.python.
  """

    tf_modules = []
    for name, module in sys.modules.items():
        if name.startswith(PACKAGE):
コード例 #17
0
    optimizer.apply_gradients(
        zip(gradients, word2vec.trainable_variables))

    return loss, learning_rate

  average_loss = 0.
  for step, (inputs, labels, progress) in enumerate(dataset):
    loss, learning_rate = train_step(inputs, labels, progress)
    average_loss += loss.numpy().mean()
    if step % log_per_steps == 0:
      if step > 0:
        average_loss /= log_per_steps
      print('step:', step, 'average_loss:', average_loss,
            'learning_rate:', learning_rate.numpy())
      average_loss = 0.

  syn0_final = word2vec.weights[0].numpy()
  np.save(os.path.join(FLAGS.out_dir, 'syn0_final'), syn0_final)
  with tf.io.gfile.GFile(os.path.join(FLAGS.out_dir, 'vocab.txt'), 'w') as f:
    for w in tokenizer.table_words:
      f.write(w + '\n')
  print('Word embeddings saved to', 
      os.path.join(FLAGS.out_dir, 'syn0_final.npy'))
  print('Vocabulary saved to', os.path.join(FLAGS.out_dir, 'vocab.txt'))


if __name__ == '__main__':
  flags.mark_flags_as_mutual_exclusive(['filenames', 'in_dir'])
  app.run(main)