Exemple #1
0
def main(_):
    tf.logging.set_verbosity(tf.logging.INFO)

    tokenizer = tokenization.FullTokenizer(
            vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)

    input_files = []
    for input_pattern in FLAGS.input_file.split(","):
        input_files.extend(tf.gfile.Glob(input_pattern))

    tf.logging.info("*** Reading from input files ***")
    for input_file in input_files:
        tf.logging.info("    %s", input_file)

    rng = random.Random(FLAGS.random_seed)
    instances = create_training_instances(
            input_files, tokenizer, FLAGS.max_seq_length, FLAGS.dupe_factor,
            FLAGS.short_seq_prob, FLAGS.masked_lm_prob, FLAGS.max_predictions_per_seq,
            rng)

    output_files = FLAGS.output_file.split(",")
    tf.logging.info("*** Writing to output files ***")
    for output_file in output_files:
        tf.logging.info("  %s", output_file)

    write_instance_to_example_files(instances, tokenizer, FLAGS.max_seq_length,
                                                                    FLAGS.max_predictions_per_seq, output_files)
Exemple #2
0
def subtoken_split(task_id):
    #robust_tokens = load_robust_token()
    token_reader = get_token_reader()

    doc_id_list = get_doc_task(task_id)
    num_doc = len(doc_id_list)

    vocab_file = os.path.join(cpath.data_path, "bert_voca.txt")
    tokenizer = tokenization.FullTokenizer(vocab_file=vocab_file,
                                           do_lower_case=True)

    window_size = 256 - 3

    skip = int(window_size / 2)
    ticker = TimeEstimator(num_doc)

    doc_seg_info = {}
    for key in doc_id_list:
        tokens = token_reader.retrieve(key)
        fn = tokenizer.wordpiece_tokenizer.tokenize
        sub_tokens = list([fn(t) for t in tokens])

        def move(loc, loc_sub, skip):
            loc_idx = loc
            num_passed_sw = 0
            loc_sub_idx = loc_sub
            for i in range(skip):
                num_passed_sw += 1
                loc_sub_idx += 1
                if num_passed_sw == len(sub_tokens[loc_idx]):
                    loc_idx += 1
                    num_passed_sw = 0

                if loc_idx >= len(sub_tokens):
                    break
            # only move in token level
            if num_passed_sw > 0:
                loc_sub_idx -= num_passed_sw
            return loc_idx, loc_sub_idx

        loc = 0
        loc_sub = 0

        interval_list = []

        while loc < len(tokens):
            loc_ed, loc_sub_ed = move(loc, loc_sub, skip)
            e = (loc, loc_ed), (loc_sub, loc_sub_ed)
            interval_list.append(e)
            loc = loc_ed
            loc_sub = loc_sub_ed

        doc_seg_info[key] = interval_list
        ticker.tick()

    p = os.path.join(cpath.data_path, "adhoc",
                     "robust_seg_info_{}.pickle".format(task_id))
    pickle.dump(doc_seg_info, open(p, "wb"))
Exemple #3
0
 def __init__(self):
     self.stopword = load_stopwords()
     self.stemmer = CacheStemmer()
     vocab_file = os.path.join(cpath.data_path, "bert_voca.txt")
     self.tokenizer = tokenization.FullTokenizer(vocab_file=vocab_file,
                                                 do_lower_case=True)
     tprint("Loading inv_index for robust")
     self.collection = RobustCollection()
     tprint("Done")
     self.num_candidate = 10
Exemple #4
0
def gen_robust_token():
    collection = load_robust_ingham()

    vocab_file = os.path.join(cpath.data_path, "bert_voca.txt")
    tokenizer = tokenization.FullTokenizer(
        vocab_file=vocab_file, do_lower_case=True)

    ticker = TimeEstimator(len(collection))
    nc = {}
    for doc_id, content in collection.items():
        nc[doc_id] = tokenizer.basic_tokenizer.tokenize(content)
        ticker.tick()

    token_path = os.path.join(cpath.data_path, "adhoc", "robust_tokens.pickle")
    pickle.dump(nc, open(token_path, "wb"))
Exemple #5
0
def save_title_tokens():
    meta_path = os.path.join(cpath.data_path, "adhoc", "robust_meta.pickle")
    meta = pickle.load(open(meta_path, "rb"))
    vocab_file = os.path.join(cpath.data_path, "bert_voca.txt")
    tokenizer = tokenization.FullTokenizer(vocab_file=vocab_file,
                                           do_lower_case=True)

    head_tokens = {}
    ticker = TimeEstimator(len(meta))
    for doc_id in meta:
        date, headline = meta[doc_id]
        h_tokens = tokenizer.basic_tokenizer.tokenize(headline)
        head_tokens[doc_id] = h_tokens
        ticker.tick()

    save_path = os.path.join(cpath.data_path, "adhoc",
                             "robust_title_tokens.pickle")
    pickle.dump(head_tokens, open(save_path, "wb"))
Exemple #6
0
 def __init__(self):
     tprint("Pipeline Init")
     self.stemmer = CacheStemmer()
     vocab_file = os.path.join(cpath.data_path, "bert_voca.txt")
     self.tokenizer = tokenization.FullTokenizer(vocab_file=vocab_file,
                                                 do_lower_case=True)
     self.iteration_dir = "/mnt/scratch/youngwookim/data/tlm_iter1"
     if not os.path.exists("/mnt/scratch/youngwookim/"):
         self.iteration_dir = "/mnt/nfs/work3/youngwookim/data/tlm_iter1"
     self.seg_max_seq = 256
     self.model_max_seq = 512
     self.rng = random.Random(0)
     self.masked_lm_prob = 0.15
     self.short_seq_prob = 0.1
     self.inst_per_job = 1000
     self.stopword = load_stopwords()
     self.pr = FeatureExtractor(self.seg_max_seq - 3)
     self.tf_record_maker = None
     self.code_tick = CodeTiming()
     tprint("Pipeline Init Done")
Exemple #7
0
def main():
    num_inst = 1000 * 1000
    max_seq = 256
    vocab_file = os.path.join(cpath.data_path, "bert_voca.txt")
    tokenizer = tokenization.FullTokenizer(
            vocab_file=vocab_file, do_lower_case=True)

    sp = StreamPickler("robust_segments_", 1000)
    ticker = TimeEstimator(num_inst)
    for i in range(num_inst):
        r = get_random_sent()
        s_id, doc_id, loc, g_id, sent= r
        doc_rows = get_doc_sent(doc_id)
        target_tokens, sent_list, prev_tokens, next_tokens = extend(doc_rows, sent, loc, tokenizer, max_seq)

        inst = target_tokens, sent_list, prev_tokens, next_tokens, doc_id

        sp.add(inst)
        ticker.tick()

    sp.flush()
Exemple #8
0
 def __init__(self):
     self.d_num_sub_tokens = dict()
     vocab_file = os.path.join(cpath.data_path, "bert_voca.txt")
     self.tokenizer = tokenization.FullTokenizer(vocab_file=vocab_file,
                                                 do_lower_case=True)
Exemple #9
0
def train_nli():
    tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
                                                  FLAGS.init_checkpoint)

    if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:
        raise ValueError(
            "At least one of `do_train`, `do_eval` or `do_predict' must be True."
        )

    bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)

    if FLAGS.max_seq_length > bert_config.max_position_embeddings:
        raise ValueError(
            "Cannot use sequence length %d because the BERT model "
            "was only trained up to sequence length %d" %
            (FLAGS.max_seq_length, bert_config.max_position_embeddings))

    tf.gfile.MakeDirs(FLAGS.output_dir)

    processor = MnliProcessor()

    label_list = processor.get_labels()

    tokenizer = tokenization.FullTokenizer(vocab_file=FLAGS.vocab_file,
                                           do_lower_case=FLAGS.do_lower_case)

    tpu_cluster_resolver = None
    if FLAGS.use_tpu and FLAGS.tpu_name:
        tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
            FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)

    is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
    run_config = tf.contrib.tpu.RunConfig(
        cluster=tpu_cluster_resolver,
        master=FLAGS.master,
        model_dir=FLAGS.output_dir,
        save_checkpoints_steps=FLAGS.save_checkpoints_steps,
        tpu_config=tf.contrib.tpu.TPUConfig(
            iterations_per_loop=FLAGS.iterations_per_loop,
            num_shards=FLAGS.num_tpu_cores,
            per_host_input_for_training=is_per_host))

    train_examples = None
    num_train_steps = None
    num_warmup_steps = None
    if FLAGS.do_train:
        train_examples = processor.get_train_examples(FLAGS.data_dir)
        num_train_steps = int(
            len(train_examples) / FLAGS.train_batch_size *
            FLAGS.num_train_epochs)
        num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)

    model_fn = model_fn_builder(bert_config=bert_config,
                                num_labels=len(label_list),
                                init_checkpoint=FLAGS.init_checkpoint,
                                learning_rate=FLAGS.learning_rate,
                                num_train_steps=num_train_steps,
                                num_warmup_steps=num_warmup_steps,
                                use_tpu=FLAGS.use_tpu,
                                use_one_hot_embeddings=FLAGS.use_tpu)

    # If TPU is not available, this will fall back to normal Estimator on CPU
    # or GPU.
    estimator = tf.contrib.tpu.TPUEstimator(
        use_tpu=FLAGS.use_tpu,
        model_fn=model_fn,
        config=run_config,
        train_batch_size=FLAGS.train_batch_size,
        eval_batch_size=FLAGS.eval_batch_size,
        predict_batch_size=FLAGS.predict_batch_size)

    if FLAGS.do_train:
        train_file = os.path.join(FLAGS.output_dir, "train.tf_record")
        file_based_convert_examples_to_features(train_examples, label_list,
                                                FLAGS.max_seq_length,
                                                tokenizer, train_file)
        tf.logging.info("***** Running training *****")
        tf.logging.info("  Num examples = %d", len(train_examples))
        tf.logging.info("  Batch size = %d", FLAGS.train_batch_size)
        tf.logging.info("  Num steps = %d", num_train_steps)
        train_input_fn = file_based_input_fn_builder(
            input_file=train_file,
            seq_length=FLAGS.max_seq_length,
            is_training=True,
            drop_remainder=True)
        estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)

    if FLAGS.do_eval:
        eval_examples = processor.get_dev_examples(FLAGS.data_dir)
        num_actual_eval_examples = len(eval_examples)
        if FLAGS.use_tpu:
            # TPU requires a fixed batch size for all batches, therefore the number
            # of examples must be a multiple of the batch size, or else examples
            # will get dropped. So we pad with fake examples which are ignored
            # later on. These do NOT count towards the metric (all tf.metrics
            # support a per-instance weight, and these get a weight of 0.0).
            while len(eval_examples) % FLAGS.eval_batch_size != 0:
                eval_examples.append(PaddingInputExample())

        eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record")
        file_based_convert_examples_to_features(eval_examples, label_list,
                                                FLAGS.max_seq_length,
                                                tokenizer, eval_file)

        tf.logging.info("***** Running evaluation *****")
        tf.logging.info("  Num examples = %d (%d actual, %d padding)",
                        len(eval_examples), num_actual_eval_examples,
                        len(eval_examples) - num_actual_eval_examples)
        tf.logging.info("  Batch size = %d", FLAGS.eval_batch_size)

        # This tells the estimator to run through the entire set.
        eval_steps = None
        # However, if running eval on the TPU, you will need to specify the
        # number of steps.
        if FLAGS.use_tpu:
            assert len(eval_examples) % FLAGS.eval_batch_size == 0
            eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size)

        eval_drop_remainder = True if FLAGS.use_tpu else False
        eval_input_fn = file_based_input_fn_builder(
            input_file=eval_file,
            seq_length=FLAGS.max_seq_length,
            is_training=False,
            drop_remainder=eval_drop_remainder)

        result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)

        output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
        with tf.gfile.GFile(output_eval_file, "w") as writer:
            tf.logging.info("***** Eval results *****")
            for key in sorted(result.keys()):
                tf.logging.info("  %s = %s", key, str(result[key]))
                writer.write("%s = %s\n" % (key, str(result[key])))

    if FLAGS.do_predict:
        predict_examples = processor.get_test_examples(FLAGS.data_dir)
        num_actual_predict_examples = len(predict_examples)
        if FLAGS.use_tpu:
            # TPU requires a fixed batch size for all batches, therefore the number
            # of examples must be a multiple of the batch size, or else examples
            # will get dropped. So we pad with fake examples which are ignored
            # later on.
            while len(predict_examples) % FLAGS.predict_batch_size != 0:
                predict_examples.append(PaddingInputExample())

        predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record")
        file_based_convert_examples_to_features(predict_examples, label_list,
                                                FLAGS.max_seq_length,
                                                tokenizer, predict_file)

        tf.logging.info("***** Running prediction*****")
        tf.logging.info("  Num examples = %d (%d actual, %d padding)",
                        len(predict_examples), num_actual_predict_examples,
                        len(predict_examples) - num_actual_predict_examples)
        tf.logging.info("  Batch size = %d", FLAGS.predict_batch_size)

        predict_drop_remainder = True if FLAGS.use_tpu else False
        predict_input_fn = file_based_input_fn_builder(
            input_file=predict_file,
            seq_length=FLAGS.max_seq_length,
            is_training=False,
            drop_remainder=predict_drop_remainder)

        result = estimator.predict(input_fn=predict_input_fn)

        output_predict_file = os.path.join(FLAGS.output_dir,
                                           "test_results.tsv")
        with tf.gfile.GFile(output_predict_file, "w") as writer:
            num_written_lines = 0
            tf.logging.info("***** Predict results *****")
            for (i, prediction) in enumerate(result):
                probabilities = prediction["probabilities"]
                if i >= num_actual_predict_examples:
                    break
                output_line = "\t".join(
                    str(class_probability)
                    for class_probability in probabilities) + "\n"
                writer.write(output_line)
                num_written_lines += 1
        assert num_written_lines == num_actual_predict_examples