Esempio n. 1
0
def gen_prediction_output(using_bert, args):
    if using_bert:
        fp = fastPredictionBert(bert_config.get(args.model_pb_dir),
                                bert_config, args.label_less)
    else:
        fp = fastPredict(bert_config.get(args.model_pb_dir), bert_config,
                         args.label_less)
        test_data_word_X = np.load(fp.data_loader.test_word_path)
    orig_label_list = fp.get_orig_test_label()
    orig_label_array = np.array(orig_label_list)
    np.save("bert_true_label.npy", orig_label_array)
    true_labels = np.load("bert_true_label.npy")
    test_data_X = np.load(fp.data_loader.test_X_path)
    # test_data_word_X = np.load(fp.data_loader.test_word_path)
    # print(test_data_X)
    prediction_list = []
    for i, text in enumerate(test_data_X):
        # print(text.shape)
        # test_data_word = test_data_word_X[i]
        if using_bert:
            prediction_list.append(
                fp.predict_text(text,
                                None,
                                orig_labels=true_labels[i],
                                raw_test_data=False,
                                using_bert=True,
                                return_raw_result=True))
        else:
            test_data_word = test_data_word_X[i]
            prediction_list.append(
                fp.predict_text(text, test_data_word, None, False, False,
                                True))
    prediction_array = np.array(prediction_list)
    np.save(args.prediction_result_path, prediction_array)
Esempio n. 2
0
def cal_span_level_micro_average(args):
    if not os.path.exists(args.prediction_result_path):
        if args.model_type == "bert":
            gen_prediction_output(True, args)
        else:
            gen_prediction_output(False, args)
    prediction_array = np.load(args.prediction_result_path)
    orig_texts, orig_labels = gen_orig_test_text_label(args.has_cls)
    if args.model_type == "bert":

        fp = fastPredictionBert(bert_config.get(args.model_pb_dir),
                                bert_config, args.label_less)
        id2slot_dict = fp.data_loader.tokenizer.id2slot
        true_entity_list = gen_entity_from_label_id_list(orig_texts,
                                                         orig_labels,
                                                         id2slot_dict,
                                                         orig_test=True)
        # print(len(prediction_array))
        # print(len(orig_texts))
        prediction_entity_list = gen_entity_from_label_id_list(
            orig_texts, prediction_array, id2slot_dict, orig_test=False)
        print(len(true_entity_list))
        print(len(prediction_entity_list))
        cal_mertric_from_two_list(prediction_entity_list, true_entity_list)
    else:
        # fp = fastPredict(config.get(args.model_pb_dir),config)
        # O
        # B - LOC
        # B - PER
        # B - ORG
        # I - LOC
        # I - ORG
        # I - PER
        id2slot_dict = {
            7: "O",
            1: "B-LOC",
            2: "B-PER",
            3: "B-ORG",
            4: "I-LOC",
            5: "I-ORG",
            6: "I-PER"
        }
        true_entity_list = gen_entity_from_label_id_list(orig_texts,
                                                         orig_labels,
                                                         id2slot_dict,
                                                         orig_test=True)
        prediction_entity_list = gen_entity_from_label_id_list(
            orig_texts, prediction_array, id2slot_dict, orig_test=False)
        cal_mertric_from_two_list(prediction_entity_list, true_entity_list)
Esempio n. 3
0
def run_bert(args):
    vocab_file_path = os.path.join(
        bert_config.get("bert_pretrained_model_path"),
        bert_config.get("vocab_file"))
    bert_config_file = os.path.join(
        bert_config.get("bert_pretrained_model_path"),
        config.get("bert_config_path"))
    slot_file = os.path.join(bert_config.get("slot_list_root_path"),
                             bert_config.get("bert_slot_file_name"))
    data_loader = bertPrepareData(vocab_file_path, slot_file, bert_config,
                                  bert_config_file, 384, True, False)
    print(data_loader.train_valid_split_data_path)
    if data_loader.train_samples_nums % args.train_batch_size != 0:
        each_epoch_steps = int(
            data_loader.train_samples_nums / args.train_batch_size) + 1
    else:
        each_epoch_steps = int(data_loader.train_samples_nums /
                               args.train_batch_size)
    # each_epoch_steps = int(data_loader.train_samples_nums/args.train_batch_size)+1
    logger.info('*****train_set sample nums:{}'.format(
        data_loader.train_samples_nums))
    logger.info('*****train each epoch steps:{}'.format(each_epoch_steps))
    train_steps_nums = each_epoch_steps * args.epochs
    logger.info('*****train_total_steps:{}'.format(train_steps_nums))
    decay_steps = args.decay_epoch * each_epoch_steps
    logger.info('*****train decay steps:{}'.format(decay_steps))
    # dropout_prob是丢弃概率
    params = {
        "dropout_prob": args.dropout_prob,
        "num_labels": data_loader.slot_label_size,
        "rnn_size": args.rnn_units,
        "num_layers": args.num_layers,
        "hidden_units": args.hidden_units,
        "decay_steps": decay_steps
    }
    # dist_strategy = tf.contrib.distribute.MirroredStrategy(num_gpus=args.gpu_nums)
    config_tf = tf.ConfigProto()
    config_tf.gpu_options.allow_growth = True
    #"bert_ce_model_dir"
    run_config = tf.estimator.RunConfig(
        model_dir=bert_config.get(args.model_checkpoint_dir),
        save_summary_steps=each_epoch_steps,
        save_checkpoints_steps=each_epoch_steps,
        session_config=config_tf,
        keep_checkpoint_max=2,
        # train_distribute=dist_strategy
    )

    bert_init_checkpoints = os.path.join(
        bert_config.get("bert_pretrained_model_path"),
        bert_config.get("bert_init_checkpoints"))
    model_fn = bert_model_fn_builder(bert_config_file, bert_init_checkpoints,
                                     args)
    estimator = tf.estimator.Estimator(model_fn,
                                       params=params,
                                       config=run_config)
    # train_hook_one = RestoreCheckpointHook(bert_init_checkpoints)
    early_stopping_hook = tf.contrib.estimator.stop_if_no_decrease_hook(
        estimator=estimator,
        metric_name='loss',
        max_steps_without_decrease=args.tolerant_steps,
        eval_dir=None,
        min_steps=0,
        run_every_secs=None,
        run_every_steps=args.run_hook_steps)
    if args.do_train:
        # train_input_fn = lambda: data_loader.create_dataset(is_training=True,is_testing=False, args=args)
        # eval_input_fn = lambda: data_loader.create_dataset(is_training=False,is_testing=False,args=args)
        train_X, train_Y = np.load(data_loader.train_X_path,
                                   allow_pickle=True), np.load(
                                       data_loader.train_Y_path,
                                       allow_pickle=True)
        train_input_fn = lambda: input_bert_fn(
            train_X, train_Y, is_training=True, args=args)
        eval_X, eval_Y = np.load(data_loader.valid_X_path,
                                 allow_pickle=True), np.load(
                                     data_loader.valid_Y_path,
                                     allow_pickle=True)

        eval_input_fn = lambda: input_bert_fn(
            eval_X, eval_Y, is_training=False, args=args)
        train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn,
                                            max_steps=train_steps_nums,
                                            hooks=[early_stopping_hook])
        exporter = tf.estimator.BestExporter(
            exports_to_keep=1,
            serving_input_receiver_fn=bert_serving_input_receiver_fn)
        eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn,
                                          exporters=[exporter],
                                          throttle_secs=0)
        # for _ in range(args.epochs):

        tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
        #"bert_ce_model_pb"
        estimator.export_saved_model(bert_config.get(args.model_pb_dir),
                                     bert_serving_input_receiver_fn)