示例#1
0
def _get_model_builder(use_t2t_decoder=True):
    """Returns a LaserTagger model_fn builder."""
    config_json = {
        "hidden_size": 4,
        "intermediate_size": 8,
        "max_position_embeddings": 8,
        "num_attention_heads": 1,
        "num_hidden_layers": 1,
        "vocab_size": 8,
        "use_t2t_decoder": use_t2t_decoder,
        "decoder_num_hidden_layers": 1,
        "decoder_hidden_size": 4,
        "decoder_num_attention_heads": 1,
        "decoder_filter_size": 4,
        "use_full_attention": False,
    }
    config = run_lasertagger_utils.LaserTaggerConfig(**config_json)
    return run_lasertagger_utils.ModelFnBuilder(config=config,
                                                num_tags=2,
                                                init_checkpoint=None,
                                                learning_rate=1e-4,
                                                num_train_steps=10,
                                                num_warmup_steps=1,
                                                use_tpu=False,
                                                use_one_hot_embeddings=False,
                                                max_seq_length=128)
def main(_):
    tf.logging.set_verbosity(tf.logging.INFO)

    if not (FLAGS.do_train or FLAGS.do_eval or FLAGS.do_export):
        raise ValueError("At least one of `do_train`, `do_eval` or `do_export` must"
                         " be True.")

    model_config = run_lasertagger_utils.LaserTaggerConfig.from_json_file(
        FLAGS.model_config_file)

    if FLAGS.max_seq_length > model_config.max_position_embeddings:
        raise ValueError(
            "Cannot use sequence length %d because the BERT model "
            "was only trained up to sequence length %d" %
            (FLAGS.max_seq_length, model_config.max_position_embeddings))

    if not FLAGS.do_export:
        tf.gfile.MkDir(FLAGS.output_dir)

    num_tags = len(utils.read_label_map(FLAGS.label_map_file))

    tpu_cluster_resolver = None
    if FLAGS.use_tpu and FLAGS.tpu_name:
        tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
            FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)

    is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
    run_config = tf.contrib.tpu.RunConfig(
        cluster=tpu_cluster_resolver,
        master=FLAGS.master,
        model_dir=FLAGS.output_dir,
        save_checkpoints_steps=FLAGS.save_checkpoints_steps,
        keep_checkpoint_max=FLAGS.keep_checkpoint_max, )
    # tpu_config=tf.contrib.tpu.TPUConfig(
    #     iterations_per_loop=FLAGS.iterations_per_loop,
    #     per_host_input_for_training=is_per_host,
    # eval_training_input_configuration=tf.contrib.tpu.InputPipelineConfig))

    if FLAGS.do_train:
        num_train_steps, num_warmup_steps = _calculate_steps(
            FLAGS.num_train_examples, FLAGS.train_batch_size,
            FLAGS.num_train_epochs, FLAGS.warmup_proportion)
    else:
        num_train_steps, num_warmup_steps = None, None

    model_fn = run_lasertagger_utils.ModelFnBuilder(
        config=model_config,
        num_tags=num_tags,
        init_checkpoint=FLAGS.init_checkpoint,
        learning_rate=FLAGS.learning_rate,
        num_train_steps=num_train_steps,
        num_warmup_steps=num_warmup_steps,
        use_tpu=FLAGS.use_tpu,
        use_one_hot_embeddings=FLAGS.use_tpu,
        max_seq_length=FLAGS.max_seq_length).build()

    # If TPU is not available, this will fall back to normal Estimator on CPU
    # or GPU.
    estimator = tf.contrib.tpu.TPUEstimator(
        # use_tpu=FLAGS.use_tpu,
        model_fn=model_fn,
        config=run_config,
        train_batch_size=FLAGS.train_batch_size,
        eval_batch_size=FLAGS.eval_batch_size,
        predict_batch_size=FLAGS.predict_batch_size
    )

    if FLAGS.do_train:
        train_input_fn = file_based_input_fn_builder(
            input_file=FLAGS.training_file,
            max_seq_length=FLAGS.max_seq_length,
            is_training=True,
            drop_remainder=True)
        estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)

    # if FLAGS.do_eval: # occur error
    #   # This tells the estimator to run through the entire set.
    #   eval_steps = None
    #   # However, if running eval on the TPU, you will need to specify the
    #   # number of steps.
    #   if FLAGS.use_tpu:
    #     # Eval will be slightly WRONG on the TPU because it will truncate
    #     # the last batch.
    #     eval_steps, _ = _calculate_steps(FLAGS.num_eval_examples,
    #                                      FLAGS.eval_batch_size, 1)
    #
    #   eval_drop_remainder = True if FLAGS.use_tpu else False
    #   eval_input_fn = file_based_input_fn_builder(
    #       input_file=FLAGS.eval_file, # FLAGS.training_file, #
    #       max_seq_length=FLAGS.max_seq_length,
    #       is_training=False, # True, # TODO
    #       drop_remainder=eval_drop_remainder)
    #
    #   for ckpt in tf.contrib.training.checkpoints_iterator(
    #       FLAGS.output_dir, timeout=FLAGS.eval_timeout):
    #     result = estimator.evaluate(input_fn=eval_input_fn,  checkpoint_path=ckpt,
    #                                 steps=eval_steps)
    #     for key in sorted(result):
    #       tf.logging.info("  %s = %s", key, str(result[key]))

    if FLAGS.do_export:
        tf.logging.info("Exporting the model...")

        def serving_input_fn():
            def _input_fn():
                features = {
                    "input_ids": tf.placeholder(tf.int64, [None, None]),
                    "input_mask": tf.placeholder(tf.int64, [None, None]),
                    "segment_ids": tf.placeholder(tf.int64, [None, None]),
                }
                return tf.estimator.export.ServingInputReceiver(
                    features=features, receiver_tensors=features)

            return _input_fn

        estimator.export_saved_model(
            FLAGS.export_path,
            serving_input_fn(),
            checkpoint_path=FLAGS.init_checkpoint)
示例#3
0
def main(_):
    tf.logging.set_verbosity(tf.logging.INFO)

    if not (FLAGS.do_train or FLAGS.do_eval or FLAGS.do_export):
        raise ValueError(
            "At least one of `do_train`, `do_eval` or `do_export` must"
            " be True.")

    if FLAGS.verb_loss_weight > 0 and (FLAGS.embedding_type is None
                                       or FLAGS.embedding_type
                                       not in ["POS", "POS_concise"]):
        raise ValueError(
            "When the verb loss weight > 0, must specify embedding_type "
            "to be either POS or POS_concise")

    model_config = run_lasertagger_utils.LaserTaggerConfig.from_json_file(
        FLAGS.model_config_file)

    if FLAGS.max_seq_length > model_config.max_position_embeddings:
        raise ValueError(
            "Cannot use sequence length %d because the BERT model "
            "was only trained up to sequence length %d" %
            (FLAGS.max_seq_length, model_config.max_position_embeddings))

    if not FLAGS.do_export:
        tf.io.gfile.makedirs(FLAGS.output_dir)

    num_tags = len(utils.read_label_map(FLAGS.label_map_file))

    tpu_cluster_resolver = None
    if FLAGS.use_tpu and FLAGS.tpu_name:
        tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
            FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)

    is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
    run_config = tf.contrib.tpu.RunConfig(
        cluster=tpu_cluster_resolver,
        master=FLAGS.master,
        model_dir=FLAGS.output_dir,
        save_checkpoints_steps=FLAGS.save_checkpoints_steps,
        keep_checkpoint_max=20,
        tpu_config=tf.contrib.tpu.TPUConfig(
            iterations_per_loop=FLAGS.iterations_per_loop,
            per_host_input_for_training=is_per_host,
            eval_training_input_configuration=tf.contrib.tpu.
            InputPipelineConfig.SLICED))

    if FLAGS.do_train:
        num_train_steps, num_warmup_steps = _calculate_steps(
            FLAGS.num_train_examples, FLAGS.train_batch_size,
            FLAGS.num_train_epochs, FLAGS.warmup_proportion)
    else:
        num_train_steps, num_warmup_steps = None, None

    if FLAGS.verb_loss_weight < 0:
        raise ValueError("the weight of verb loss should be >= 0")

    if not FLAGS.use_tpu:
        with open(os.path.expanduser(FLAGS.label_map_file)) as f:
            lines = f.readlines()
    else:
        lines = pd.read_csv(FLAGS.label_map_file, sep="\n", header=None)
        lines = lines.values.tolist()
        lines = [item for sublist in lines for item in sublist]
    lines = [line.strip() for line in lines]

    delete_tags = np.zeros(len(lines))
    delete_tags_ids = []
    keep_tags_ids = []
    for i, line in enumerate(lines):
        if re.match("DELETE", line):
            delete_tags[i] = 1
            delete_tags_ids.append(i)
        if re.match("KEEP", line):
            keep_tags_ids.append(i)

    if FLAGS.embedding_type == "POS":
        model_verb_tags = VERB_TAGS
    elif FLAGS.embedding_type == "POS_concise":
        model_verb_tags = VERB_TAGS_CONCISE
    else:
        model_verb_tags = []

    model_fn = run_lasertagger_utils.ModelFnBuilder(
        config=model_config,
        num_tags=num_tags,
        init_checkpoint=FLAGS.init_checkpoint,
        learning_rate=FLAGS.learning_rate,
        num_train_steps=num_train_steps,
        num_warmup_steps=num_warmup_steps,
        use_tpu=FLAGS.use_tpu,
        use_one_hot_embeddings=FLAGS.use_tpu,
        max_seq_length=FLAGS.max_seq_length,
        verb_deletion_loss_weight=FLAGS.verb_loss_weight,
        verb_tags=model_verb_tags,
        delete_tags=delete_tags,
        relative_loss_weight=[
            FLAGS.add_tag_loss_weight, FLAGS.a_tag_loss_weight,
            FLAGS.delete_tag_loss_weight
        ],
        smallest_add_tag=3,
        delete_tags_ids=delete_tags_ids,
        keep_tags_ids=keep_tags_ids).build()

    # If TPU is not available, this will fall back to normal Estimator on CPU
    # or GPU.
    estimator = tf.contrib.tpu.TPUEstimator(
        use_tpu=FLAGS.use_tpu,
        model_fn=model_fn,
        config=run_config,
        train_batch_size=FLAGS.train_batch_size,
        eval_batch_size=FLAGS.eval_batch_size,
        predict_batch_size=FLAGS.predict_batch_size)

    if FLAGS.do_train:
        train_input_fn = file_based_input_fn_builder(
            input_file=FLAGS.training_file,
            max_seq_length=FLAGS.max_seq_length,
            is_training=True,
            drop_remainder=True)
        estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)

    if FLAGS.do_eval:
        # This tells the estimator to run through the entire set.
        eval_steps = None
        # However, if running eval on the TPU, you will need to specify the
        # number of steps.
        if FLAGS.use_tpu:
            # Eval will be slightly WRONG on the TPU because it will truncate
            # the last batch.
            eval_steps, _ = _calculate_steps(FLAGS.num_eval_examples,
                                             FLAGS.eval_batch_size, 1)

        eval_drop_remainder = True if FLAGS.use_tpu else False
        eval_input_fn = file_based_input_fn_builder(
            input_file=FLAGS.eval_file,
            max_seq_length=FLAGS.max_seq_length,
            is_training=False,
            drop_remainder=eval_drop_remainder)

        for ckpt in tf.contrib.training.checkpoints_iterator(
                FLAGS.output_dir, timeout=FLAGS.eval_timeout):
            result = estimator.evaluate(input_fn=eval_input_fn,
                                        checkpoint_path=ckpt,
                                        steps=eval_steps)
            for key in sorted(result):
                tf.logging.info("  %s = %s", key, str(result[key]))

    if FLAGS.do_export:
        tf.logging.info("Exporting the model...")

        def serving_input_fn():
            def _input_fn():
                features = {
                    "input_ids": tf.placeholder(tf.int64, [None, None]),
                    "input_mask": tf.placeholder(tf.int64, [None, None]),
                    "segment_ids": tf.placeholder(tf.int64, [None, None]),
                }
                return tf.estimator.export.ServingInputReceiver(
                    features=features, receiver_tensors=features)

            return _input_fn

        estimator.export_saved_model(FLAGS.export_path,
                                     serving_input_fn(),
                                     checkpoint_path=FLAGS.init_checkpoint)
示例#4
0
def main(_):
  tf.logging.set_verbosity(tf.logging.INFO)

  if not (FLAGS.do_train or FLAGS.do_eval or FLAGS.do_export):
    raise ValueError("At least one of `do_train`, `do_eval` or `do_export` must"
                     " be True.")

  model_config = run_lasertagger_utils.LaserTaggerConfig.from_json_file(
      FLAGS.model_config_file)

  if FLAGS.max_seq_length > model_config.max_position_embeddings:
    raise ValueError(
        "Cannot use sequence length %d because the BERT model "
        "was only trained up to sequence length %d" %
        (FLAGS.max_seq_length, model_config.max_position_embeddings))

  if not FLAGS.do_export:
    tf.io.gfile.makedirs(FLAGS.output_dir)

  num_tags = len(utils.read_label_map(FLAGS.label_map_file))

  tpu_cluster_resolver = None
  if FLAGS.use_tpu and FLAGS.tpu_name:
    tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
        FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)

  is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
  run_config = tf.contrib.tpu.RunConfig(
      cluster=tpu_cluster_resolver,
      master=FLAGS.master,
      model_dir=FLAGS.output_dir,
      save_checkpoints_steps=FLAGS.save_checkpoints_steps,
      keep_checkpoint_max=FLAGS.keep_checkpoint_max,
      tpu_config=tf.contrib.tpu.TPUConfig(
          iterations_per_loop=FLAGS.iterations_per_loop,
          per_host_input_for_training=is_per_host,
          eval_training_input_configuration=tf.contrib.tpu.InputPipelineConfig.SLICED))

  if FLAGS.do_train:
    num_train_steps, num_warmup_steps = _calculate_steps(
        FLAGS.num_train_examples, FLAGS.train_batch_size,
        FLAGS.num_train_epochs, FLAGS.warmup_proportion)
  else:
    num_train_steps, num_warmup_steps = None, None

  model_fn = run_lasertagger_utils.ModelFnBuilder(
      config=model_config,
      num_tags=num_tags,
      init_checkpoint=FLAGS.init_checkpoint,
      learning_rate=FLAGS.learning_rate,
      num_train_steps=num_train_steps,
      num_warmup_steps=num_warmup_steps,
      use_tpu=FLAGS.use_tpu,
      use_one_hot_embeddings=FLAGS.use_tpu,
      max_seq_length=FLAGS.max_seq_length).build()

  # If TPU is not available, this will fall back to normal Estimator on CPU
  # or GPU.
  estimator =tf.contrib.tpu.TPUEstimator(
      use_tpu=FLAGS.use_tpu,
      model_fn=model_fn,
      config=run_config,
      train_batch_size=FLAGS.train_batch_size,
      eval_batch_size=FLAGS.eval_batch_size,
      predict_batch_size=FLAGS.predict_batch_size
  )

  if FLAGS.do_train:
    train_input_fn = file_based_input_fn_builder(
        input_file=FLAGS.training_file,
        max_seq_length=FLAGS.max_seq_length,
        is_training=True,
        drop_remainder=True)
    estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)

  if FLAGS.do_export:
    tf.logging.info("Exporting the model...")
    def serving_input_fn():
      def _input_fn():
        features = {
            "input_ids": tf.placeholder(tf.int64, [None, None]),
            "input_mask": tf.placeholder(tf.int64, [None, None]),
            "segment_ids": tf.placeholder(tf.int64, [None, None]),
        }
        return tf.estimator.export.ServingInputReceiver(
            features=features, receiver_tensors=features)
      return _input_fn

    estimator.export_saved_model(
        FLAGS.export_path,
        serving_input_fn(),
        checkpoint_path=FLAGS.init_checkpoint)