def main_class():
    start = time.time()
    # Loads parameters from json file
    vocab_dict = id_word_map()
    with open(FLAGS.params_file) as f:
        config = json.load(f)
    config["train_size"] = 26320
    config["max_length"] = 250
    config["id_word"] = vocab_dict
    config["word_dim"] = 300
    if config["train_size"] < FLAGS.shuffle_buffer_size:
        FLAGS.shuffle_buffer_size = config["train_size"]

    train_steps = int(config["train_size"] / FLAGS.batch_size *
                      FLAGS.num_epoches)
    logging.info('The number of training steps is {}'.format(train_steps))
    session_config = tf.ConfigProto(log_device_placement=True)
    session_config.gpu_options.per_process_gpu_memory_fraction = 0.7
    session_config.gpu_options.allow_growth = True
    run_config = tf.estimator.RunConfig(
        save_checkpoints_steps=FLAGS.steps_check,
        session_config=session_config,
        keep_checkpoint_max=3,
        tf_random_seed=1)
    num_warmup_steps = int(train_steps * FLAGS.warmup_proportion)
    early_stop_steps = int(train_steps * 0.4)
    estimator = tf.estimator.Estimator(
        model_fn=model_fn,
        model_dir=model_dir,
        config=run_config,
        params={
            "word_dim": config["word_dim"],
            "id_word": config["id_word"],
            "train_size": config["train_size"],
            'max_length': config["max_length"],
            'emb_file': FLAGS.emb_file,
            'learning_rate': FLAGS.learning_rate,
            'l2_reg_lambda': FLAGS.l2_reg_lambda,
            'dropout_prob': FLAGS.dropout_prob,
            'vocab': FLAGS.word_path,
            'num_filters': FLAGS.num_filters,
            'filter_sizes': list(map(int, FLAGS.filter_sizes.split(","))),
            'num_warmup_steps': num_warmup_steps,
            'train_steps': train_steps,
            'summary_dir': model_dir,
            "label_size": 125,
            "activation": "gelu",
            'use_focal_loss': False,
            'use_author_feature': False,
            'use_category_feature': False,
            'use_keyword_feature': False,
            'feature_dim': FLAGS.feature_dim
        })
    no_increase_steps = int(config["train_size"] / FLAGS.batch_size *
                            FLAGS.early_stop_epoches)

    # 用于early stop
    early_stop_hook = tf.contrib.estimator.stop_if_no_increase_hook(
        estimator,
        metric_name='f1',
        max_steps_without_increase=no_increase_steps,
        min_steps=early_stop_steps,
        run_every_secs=120)
    acc2 = 0

    # timeline_hook = tf.train.ProfilerHook(save_steps=FLAGS.steps_check, output_dir=model_dir + '/timeline/')
    if FLAGS.do_train == True:
        input_fn_for_train = lambda: input_fn(FLAGS.train_file, config, FLAGS.
                                              shuffle_buffer_size)
        train_spec = tf.estimator.TrainSpec(input_fn=input_fn_for_train,
                                            max_steps=train_steps)
        input_fn_for_eval = lambda: input_fn(FLAGS.valid_file, config, 0)
        # best_copier = BestCheckpointCopier(name='best',  # directory within model directory to copy checkpoints to
        #         checkpoints_to_keep=1,  # number of checkpoints to keep
        #         score_metric='acc',  # metric to use to determine "best"
        #         compare_fn=lambda x, y: x.score > y.score,
        #         sort_reverse=True)
        eval_spec = tf.estimator.EvalSpec(
            input_fn=input_fn_for_eval,
            throttle_secs=1200)  # exporters=best_copier
        tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
        logging.info("Switch to the current directory and Run the command line:" \
                    "tensorboard --logdir=%s" \
                    "\nThen open http://localhost:6006/ into your web browser" % timestamp)
        logging.info("after train and evaluate")
    if FLAGS.do_predict == True:
        best_dir = model_dir + '/best'
        input_fn_for_test = lambda: input_fn(FLAGS.valid_file, config, 0)
        output_results = estimator.predict(
            input_fn_for_test,
            checkpoint_path=tf.train.latest_checkpoint(best_dir))
        path_label = FLAGS.label_path
        history_dir = FLAGS.history_dir
        acc2, acc1 = post_eval(path_label, model_dir, history_dir,
                               output_results)
        logging.info(best_dir)
        logging.info(
            "The total program takes =and top2 acc is {}".format(acc2))

        if acc2 > 0.3:
            input_fn_for_test = lambda: input_fn(FLAGS.pred_file, config, 0)
            output_results = estimator.predict(
                input_fn_for_test,
                checkpoint_path=tf.train.latest_checkpoint(best_dir))
            path_label = FLAGS.label_path
            history_dir = FLAGS.history_dir
            post_pred(path_label, model_dir, history_dir, output_results, acc2)
            logging.info("===********* done  pred top2 {}".format(acc2))
    return acc2
Beispiel #2
0
def main(_):
    tf.logging.set_verbosity(tf.logging.INFO)

    if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:
        raise ValueError(
            "At least one of `do_train`, `do_eval` or `do_predict' must be True."
        )

    bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)

    if FLAGS.max_seq_length > bert_config.max_position_embeddings:
        raise ValueError(
            "Cannot use sequence1 length %d because the BERT model "
            "was only trained up to sequence length %d" %
            (FLAGS.max_seq_length, bert_config.max_position_embeddings))

    tf.gfile.MakeDirs(FLAGS.output_dir)

    task_name = FLAGS.task_name.lower()

    tpu_cluster_resolver = None
    if FLAGS.use_tpu and FLAGS.tpu_name:
        tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
            FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)

    is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
    session_config = tf.ConfigProto(log_device_placement=True)
    session_config.gpu_options.per_process_gpu_memory_fraction = 0.7
    session_config.gpu_options.allow_growth = True
    run_config = tf.contrib.tpu.RunConfig(
        cluster=tpu_cluster_resolver,
        master=FLAGS.master,
        model_dir=FLAGS.output_dir,
        save_checkpoints_steps=FLAGS.save_checkpoints_steps,
        tpu_config=tf.contrib.tpu.TPUConfig(
            iterations_per_loop=FLAGS.iterations_per_loop,
            num_shards=FLAGS.num_tpu_cores,
            per_host_input_for_training=is_per_host))

    num_train_steps = None
    num_warmup_steps = None
    num_train_example = None
    if FLAGS.do_train:
        train_meta = os.path.join(FLAGS.data_dir, "train.json")
        with open(train_meta, 'r') as f:
            d = json.load(f)
        num_train_example = d['num_train_example']
        num_train_steps = int(num_train_example / FLAGS.train_batch_size *
                              FLAGS.num_train_epochs)
        num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)

    model_fn = model_fn_builder(bert_config=bert_config,
                                num_labels=FLAGS.num_lables,
                                init_checkpoint=FLAGS.init_checkpoint,
                                learning_rate=FLAGS.learning_rate,
                                num_train_steps=num_train_steps,
                                num_warmup_steps=num_warmup_steps,
                                use_tpu=FLAGS.use_tpu,
                                use_one_hot_embeddings=FLAGS.use_tpu)

    # If TPU is not available, this will fall back to normal Estimator on CPU
    # or GPU.
    estimator = tf.contrib.tpu.TPUEstimator(
        use_tpu=FLAGS.use_tpu,
        model_fn=model_fn,
        config=run_config,
        train_batch_size=FLAGS.train_batch_size,
        eval_batch_size=FLAGS.eval_batch_size,
        predict_batch_size=FLAGS.predict_batch_size)

    if FLAGS.do_train:
        train_file = os.path.join(FLAGS.data_dir, "train*.tfrecord")

        train_input_fn = file_based_input_fn_builder(
            input_file=train_file,
            seq_length=FLAGS.max_seq_length,
            is_training=True,
            drop_remainder=True,
            shuffle=True)
        estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)

    if FLAGS.do_eval:
        eval_file = os.path.join(FLAGS.data_dir, "eval*.tfrecord")
        eval_meta = os.path.join(FLAGS.data_dir, "eval.json")

        with open(eval_meta, 'r') as f:
            d = json.load(f)
            num_eval_examples = d['num_eval_examples']

        tf.logging.info("***** Running evaluation *****")
        tf.logging.info("  Num examples = %d", num_eval_examples)
        tf.logging.info("  Batch size = %d", FLAGS.eval_batch_size)

        # This tells the estimator to run through the entire set.
        eval_steps = FLAGS.eval_steps
        if eval_steps == 0:
            eval_steps = None
        eval_steps = None
        # However, if running eval on the TPU, you will need to specify the
        # number of steps.1
        if FLAGS.use_tpu:
            # Eval will be slightly WRONG on the TPU because it will truncate
            # the last batch.1
            eval_steps = int(num_eval_examples / FLAGS.eval_batch_size)
        eval_steps = int(num_eval_examples / FLAGS.eval_batch_size)
        eval_drop_remainder = True if FLAGS.use_tpu else False
        eval_input_fn = file_based_input_fn_builder(
            input_file=eval_file,
            seq_length=FLAGS.max_seq_length,
            is_training=False,
            drop_remainder=eval_drop_remainder)

        result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)

        output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
        with tf.gfile.GFile(output_eval_file, "w") as writer:
            tf.logging.info("***** Eval results *****")
            for key in sorted(result.keys()):
                tf.logging.info("  %s = %s", key, str(result[key]))
                writer.write("%s = %s\n" % (key, str(result[key])))

    if FLAGS.do_eval_pred:

        predict_drop_remainder = True if FLAGS.use_tpu else False
        predict_file = os.path.join(FLAGS.data_dir, "eval*.tfrecord")
        predict_input_fn = file_based_input_fn_builder(
            input_file=predict_file,
            seq_length=FLAGS.max_seq_length,
            is_training=False,
            drop_remainder=predict_drop_remainder)

        output_results = estimator.predict(input_fn=predict_input_fn)
        model_dir = FLAGS.output_dir
        path_label = FLAGS.label_path
        history_dir = FLAGS.history_dir
        acc2, acc1 = post_eval(path_label, output_results, model_dir,
                               history_dir)

        logging.info(
            "The total program takes =and top2 acc is {}".format(acc2))

        if acc2 > 0.7:
            predict_file = os.path.join(FLAGS.data_dir, "pred*.tfrecord")
            predict_input_fn = file_based_input_fn_builder(
                input_file=predict_file,
                seq_length=FLAGS.max_seq_length,
                is_training=False,
                drop_remainder=predict_drop_remainder)
            output_results = estimator.predict(predict_input_fn)
            path_label = FLAGS.label_path
            history_dir = FLAGS.history_dir
            post_pred(path_label, model_dir, history_dir, output_results, acc2)

    if FLAGS.do_predict:
        pred_meta = os.path.join(FLAGS.data_dir, "predict.json")
        predict_file = os.path.join(FLAGS.data_dir, "predict*.tfrecord")
        with open(pred_meta, 'r') as f:
            d = json.load(f)
            num_pred_examples = d['num_pred_examples']

        tf.logging.info("***** Running prediction*****")
        tf.logging.info("  Num examples = %d", num_pred_examples)
        tf.logging.info("  Batch size = %d", FLAGS.predict_batch_size)

        if FLAGS.use_tpu:
            # Warning: According to tpu_estimator.py Prediction on TPU is an
            # experimental feature and hence not supported here
            raise ValueError("Prediction in TPU not supported")

        predict_drop_remainder = True if FLAGS.use_tpu else False
        predict_input_fn = file_based_input_fn_builder(
            input_file=predict_file,
            seq_length=FLAGS.max_seq_length,
            is_training=False,
            drop_remainder=predict_drop_remainder)

        result = estimator.predict(input_fn=predict_input_fn)

        output_predict_file = os.path.join(FLAGS.output_dir,
                                           "test_results.tsv")
        with tf.gfile.GFile(output_predict_file, "w") as writer:
            tf.logging.info("***** Predict results *****")
            for prediction in result:
                output_line = "\t".join(
                    str(class_probability)
                    for class_probability in prediction) + "\n"
                writer.write(output_line)