예제 #1
0
def test(exe, chunk_evaluator, inference_program, test_data, place):
    chunk_evaluator.reset(exe)
    for data in test_data():
        word = to_lodtensor(map(lambda x: x[0], data), place)
        mark = to_lodtensor(map(lambda x: x[1], data), place)
        target = to_lodtensor(map(lambda x: x[2], data), place)
        acc = exe.run(inference_program,
                      feed={"word": word,
                            "mark": mark,
                            "target": target})
    return chunk_evaluator.eval(exe)
예제 #2
0
def infer(model_path, batch_size, test_data_file, vocab_file, target_file,
          use_gpu):
    """
    use the model under model_path to predict the test data, the result will be printed on the screen

    return nothing
    """
    word_dict = load_dict(vocab_file)
    word_reverse_dict = load_reverse_dict(vocab_file)

    label_dict = load_dict(target_file)
    label_reverse_dict = load_reverse_dict(target_file)

    test_data = paddle.batch(reader.data_reader(test_data_file, word_dict,
                                                label_dict),
                             batch_size=batch_size)
    place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)

    inference_scope = fluid.Scope()
    with fluid.scope_guard(inference_scope):
        [inference_program, feed_target_names,
         fetch_targets] = fluid.io.load_inference_model(model_path, exe)
        for data in test_data():
            word = to_lodtensor([x[0] for x in data], place)
            mark = to_lodtensor([x[1] for x in data], place)
            crf_decode = exe.run(inference_program,
                                 feed={
                                     "word": word,
                                     "mark": mark
                                 },
                                 fetch_list=fetch_targets,
                                 return_numpy=False)
            lod_info = (crf_decode[0].lod())[0]
            np_data = np.array(crf_decode[0])
            assert len(data) == len(lod_info) - 1
            for sen_index in six.moves.xrange(len(data)):
                assert len(
                    data[sen_index][0]) == lod_info[sen_index +
                                                    1] - lod_info[sen_index]
                word_index = 0
                for tag_index in six.moves.xrange(lod_info[sen_index],
                                                  lod_info[sen_index + 1]):
                    word = word_reverse_dict[data[sen_index][0][word_index]]
                    gold_tag = label_reverse_dict[data[sen_index][2]
                                                  [word_index]]
                    tag = label_reverse_dict[np_data[tag_index][0]]
                    print(word + "\t" + gold_tag + "\t" + tag)
                    word_index += 1
                print("")
예제 #3
0
def test(exe, chunk_evaluator, inference_program, test_data, test_fetch_list,
         place):
    chunk_evaluator.reset()
    for data in test_data():
        word = to_lodtensor([x[0] for x in data], place)
        mark = to_lodtensor([x[1] for x in data], place)
        target = to_lodtensor([x[2] for x in data], place)
        rets = exe.run(inference_program,
                       feed={
                           "word": word,
                           "mark": mark,
                           "target": target
                       },
                       fetch_list=test_fetch_list)
        num_infer = np.array(rets[0])
        num_label = np.array(rets[1])
        num_correct = np.array(rets[2])
        chunk_evaluator.update(num_infer[0].astype('int64'),
                               num_label[0].astype('int64'),
                               num_correct[0].astype('int64'))
    return chunk_evaluator.eval()