Ejemplo n.º 1
0
def infer(args):
    id2word_dict = reader.load_dict(args.word_dict_path)
    word2id_dict = reader.load_reverse_dict(args.word_dict_path)

    id2label_dict = reader.load_dict(args.label_dict_path)
    label2id_dict = reader.load_reverse_dict(args.label_dict_path)
    q2b_dict = reader.load_dict(args.word_rep_dict_path)
    test_data = paddle.batch(reader.test_reader(args.test_data_dir,
                                                word2id_dict, label2id_dict,
                                                q2b_dict),
                             batch_size=args.batch_size)
    place = fluid.CPUPlace()
    exe = fluid.Executor(place)

    inference_scope = fluid.core.Scope()
    with fluid.scope_guard(inference_scope):
        [inference_program, feed_target_names,
         fetch_targets] = fluid.io.load_inference_model(args.model_path, exe)
        for data in test_data():
            full_out_str = ""
            word_idx = to_lodtensor([x[0] for x in data], place)
            word_list = [x[1] for x in data]
            (crf_decode, ) = exe.run(inference_program,
                                     feed={"word": word_idx},
                                     fetch_list=fetch_targets,
                                     return_numpy=False)
            lod_info = (crf_decode.lod())[0]
            np_data = np.array(crf_decode)
            assert len(data) == len(lod_info) - 1
            for sen_index in xrange(len(data)):
                assert len(
                    data[sen_index][0]) == lod_info[sen_index +
                                                    1] - lod_info[sen_index]
                word_index = 0
                outstr = ""
                cur_full_word = ""
                cur_full_tag = ""
                words = word_list[sen_index]
                for tag_index in xrange(lod_info[sen_index],
                                        lod_info[sen_index + 1]):
                    cur_word = words[word_index]
                    cur_tag = id2label_dict[str(np_data[tag_index][0])]
                    if cur_tag.endswith("-B") or cur_tag.endswith("O"):
                        if len(cur_full_word) != 0:
                            outstr += cur_full_word.encode(
                                'utf8') + "/" + cur_full_tag.encode(
                                    'utf8') + " "
                        cur_full_word = cur_word
                        cur_full_tag = get_real_tag(cur_tag)
                    else:
                        cur_full_word += cur_word
                    word_index += 1
                outstr += cur_full_word.encode(
                    'utf8') + "/" + cur_full_tag.encode('utf8') + " "
                outstr = outstr.strip()
                full_out_str += outstr + "\n"
            print full_out_str.strip()
Ejemplo n.º 2
0
def train(args):
    """
    Train the network.
    """
    if not os.path.exists(args.model_save_dir):
        os.mkdir(args.model_save_dir)

    word2id_dict = reader.load_reverse_dict(args.word_dict_path)
    label2id_dict = reader.load_reverse_dict(args.label_dict_path)
    word_rep_dict = reader.load_dict(args.word_rep_dict_path)
    word_dict_len = max(map(int, word2id_dict.values())) + 1
    label_dict_len = max(map(int, label2id_dict.values())) + 1

    avg_cost, crf_decode, word, target = lex_net(args, word_dict_len,
                                                 label_dict_len)
    sgd_optimizer = fluid.optimizer.SGD(learning_rate=args.base_learning_rate)
    sgd_optimizer.minimize(avg_cost)

    (precision, recall, f1_score, num_infer_chunks, num_label_chunks,
     num_correct_chunks) = fluid.layers.chunk_eval(
         input=crf_decode,
         label=target,
         chunk_scheme="IOB",
         num_chunk_types=int(math.ceil((label_dict_len - 1) / 2.0)))
    chunk_evaluator = fluid.metrics.ChunkEvaluator()
    chunk_evaluator.reset()

    train_reader_list = []
    corpus_num = len(args.corpus_type_list)
    for i in xrange(corpus_num):
        train_reader = paddle.batch(
            paddle.reader.shuffle(reader.file_reader(args.traindata_dir,
                                                     word2id_dict,
                                                     label2id_dict,
                                                     word_rep_dict,
                                                     args.corpus_type_list[i]),
                                  buf_size=args.traindata_shuffle_buffer),
            batch_size=int(args.batch_size * args.corpus_proportion_list[i]))
        train_reader_list.append(train_reader)
    test_reader = paddle.batch(reader.file_reader(args.testdata_dir,
                                                  word2id_dict, label2id_dict,
                                                  word_rep_dict),
                               batch_size=args.batch_size)
    train_reader_itr_list = []
    for train_reader in train_reader_list:
        cur_reader_itr = train_reader()
        train_reader_itr_list.append(cur_reader_itr)

    place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
    feeder = fluid.DataFeeder(feed_list=[word, target], place=place)
    exe = fluid.Executor(place)
    exe.run(fluid.default_startup_program())
    batch_id = 0
    start_time = time.time()
    eval_list = []
    iter = 0
    while True:
        full_batch = []
        cur_batch = []
        for i in xrange(corpus_num):
            reader_itr = train_reader_itr_list[i]
            try:
                cur_batch = next(reader_itr)
            except StopIteration:
                print(args.corpus_type_list[i] +
                      " corpus finish a pass of training")
                new_reader = train_reader_list[i]
                train_reader_itr_list[i] = new_reader()
                cur_batch = next(train_reader_itr_list[i])
            full_batch += cur_batch
        random.shuffle(full_batch)

        cost_var, nums_infer, nums_label, nums_correct = exe.run(
            fluid.default_main_program(),
            fetch_list=[
                avg_cost, num_infer_chunks, num_label_chunks,
                num_correct_chunks
            ],
            feed=feeder.feed(full_batch))
        print("batch_id:" + str(batch_id) + ", avg_cost:" + str(cost_var[0]))
        chunk_evaluator.update(nums_infer, nums_label, nums_correct)
        batch_id += 1

        if (batch_id % args.save_model_per_batchs == 1):
            save_exe = fluid.Executor(place)
            save_dirname = os.path.join(args.model_save_dir,
                                        "params_batch_%d" % batch_id)
            fluid.io.save_inference_model(save_dirname, ['word'], [crf_decode],
                                          save_exe)
            temp_save_model = os.path.join(args.model_save_dir,
                                           "temp_model_for_test")
            fluid.io.save_inference_model(
                temp_save_model, ['word', 'target'],
                [num_infer_chunks, num_label_chunks, num_correct_chunks],
                save_exe)

            precision, recall, f1_score = chunk_evaluator.eval()
            print("[train] batch_id:" + str(batch_id) + ", precision:" +
                  str(precision) + ", recall:" + str(recall) + ", f1:" +
                  str(f1_score))
            chunk_evaluator.reset()
            p, r, f1 = test(exe, chunk_evaluator, temp_save_model, test_reader,
                            place)
            chunk_evaluator.reset()
            print("[test] batch_id:" + str(batch_id) + ", precision:" +
                  str(p) + ", recall:" + str(r) + ", f1:" + str(f1))
            end_time = time.time()
            print("cur_batch_id:" + str(batch_id) + ", last " +
                  str(args.save_model_per_batchs) + " batchs, time_cost:" +
                  str(end_time - start_time))
            start_time = time.time()

            if len(eval_list) < 2 * args.eval_window:
                eval_list.append(f1)
            else:
                eval_list.pop(0)
                eval_list.append(f1)
                last_avg_f1 = sum(
                    eval_list[0:args.eval_window]) / args.eval_window
                cur_avg_f1 = sum(
                    eval_list[args.eval_window:2 *
                              args.eval_window]) / args.eval_window
                if cur_avg_f1 <= last_avg_f1:
                    return
                else:
                    print "keep training!"
        iter += 1
        if (iter == args.num_iterations):
            return
Ejemplo n.º 3
0
    predictor.zero_copy_run()

    results = []
    # get out data from output tensor
    output_names = predictor.get_output_names()
    for i, name in enumerate(output_names):
        output_tensor = predictor.get_output_tensor(name)
        output_data = output_tensor.copy_to_cpu()
        results.append(output_data)
    return results


if __name__ == '__main__':

    args = parse_args()
    word2id_dict = reader.load_reverse_dict(args.word_dict_path)
    label2id_dict = reader.load_reverse_dict(args.label_dict_path)
    word_rep_dict = reader.load_dict(args.word_rep_dict_path)
    word_dict_len = max(map(int, word2id_dict.values())) + 1
    label_dict_len = max(map(int, label2id_dict.values())) + 1

    pred = create_predictor(args)

    test_data = paddle.batch(reader.file_reader(args.testdata_dir,
                                                word2id_dict, label2id_dict,
                                                word_rep_dict),
                             batch_size=1)
    batch_id = 0
    id2word = {v: k for k, v in word2id_dict.items()}
    id2label = {v: k for k, v in label2id_dict.items()}
    for data in test_data():