Пример #1
0
def predict(conf_dict):
    """
    run predict
    """
    with open("samples.txt", "w") as samples_file:
        with open("predictions.txt", "w") as predictions_file:
            # Get model path
            model_save_dir = conf_dict["model_path"]
            model_path = os.path.join(model_save_dir,
                                      str(conf_dict["use_epoch"]))
            # Get device
            if "use_cuda" in conf_dict and conf_dict["use_cuda"] == 1:
                place = fluid.core.CUDAPlace(0)
            else:
                place = fluid.core.CPUPlace()
            # Get executor
            executor = fluid.Executor(place=place)
            # Load model
            program, feed_var_names, fetch_targets = fluid.io.load_inference_model(
                model_path, executor)
            if conf_dict["task_mode"] == "pairwise":
                # Get Feeder and Reader
                feeder = fluid.DataFeeder(place=place,
                                          feed_list=feed_var_names,
                                          program=program)
                reader = data_reader.get_reader(conf_dict, True, samples_file)
            else:
                # Get Feeder and Reader
                feeder = fluid.DataFeeder(place=place,
                                          feed_list=feed_var_names,
                                          program=program)
                reader = data_reader.get_reader(conf_dict, True, samples_file)
            # Get batch data iterator
            batch_data = paddle.batch(reader,
                                      conf_dict["batch_size"],
                                      drop_last=False)
            logging.info("start test process ...")
            bt = time.time()
            for iter, data in enumerate(batch_data()):
                output = executor.run(program,
                                      feed=feeder.feed(data),
                                      fetch_list=fetch_targets)
                if conf_dict["task_mode"] == "pairwise":
                    predictions_file.write(
                        "\n".join(map(lambda item: str(item[0]), output[1])) +
                        "\n")
                else:
                    predictions_file.write("\n".join(
                        map(lambda item: str(np.argmax(item)), output[1])) +
                                           "\n")
            if iter:
                print("predict speed is {} s/examples".format(
                    (time.time() - bt) / iter))
            else:
                print("please check data, maybe it's null!")
    utils.get_result_file(conf_dict, "samples.txt", "predictions.txt")
Пример #2
0
def test(conf_dict, args):
    """
    Evaluation Function
    """
    if args.use_cuda:
        place = fluid.CUDAPlace(0)
    else:
        place = fluid.CPUPlace()
    exe = fluid.Executor(place)

    vocab = utils.load_vocab(args.vocab_path)
    simnet_process = reader.SimNetProcessor(args, vocab)

    startup_prog = fluid.Program()

    get_test_examples = simnet_process.get_reader("test")
    batch_data = fluid.io.batch(get_test_examples,
                                args.batch_size,
                                drop_last=False)
    test_prog = fluid.Program()

    conf_dict['dict_size'] = len(vocab)

    net = utils.import_class("../shared_modules/models/matching",
                             conf_dict["net"]["module_name"],
                             conf_dict["net"]["class_name"])(conf_dict)

    metric = fluid.metrics.Auc(name="auc")

    with io.open("predictions.txt", "w", encoding="utf8") as predictions_file:
        if args.task_mode == "pairwise":
            with fluid.program_guard(test_prog, startup_prog):
                with fluid.unique_name.guard():
                    test_loader, left, pos_right = create_model(
                        args, is_inference=True)
                    left_feat, pos_score = net.predict(left, pos_right)
                    pred = pos_score
            test_prog = test_prog.clone(for_test=True)

        else:
            with fluid.program_guard(test_prog, startup_prog):
                with fluid.unique_name.guard():
                    test_loader, left, right = create_model(args,
                                                            is_inference=True)
                    left_feat, pred = net.predict(left, right)
            test_prog = test_prog.clone(for_test=True)

        exe.run(startup_prog)

        utils.init_checkpoint(exe,
                              args.init_checkpoint,
                              main_program=test_prog)

        test_exe = exe
        test_loader.set_sample_list_generator(batch_data)

        logging.info("start test process ...")
        test_loader.start()
        pred_list = []
        fetch_list = [pred.name]
        output = []
        while True:
            try:
                output = test_exe.run(program=test_prog, fetch_list=fetch_list)
                if args.task_mode == "pairwise":
                    pred_list += list(
                        map(lambda item: float(item[0]), output[0]))
                    predictions_file.write(u"\n".join(
                        map(lambda item: str((item[0] + 1) / 2), output[0])) +
                                           "\n")
                else:
                    pred_list += map(lambda item: item, output[0])
                    predictions_file.write(u"\n".join(
                        map(lambda item: str(np.argmax(item)), output[0])) +
                                           "\n")
            except fluid.core.EOFException:
                test_loader.reset()
                break
        if args.task_mode == "pairwise":
            pred_list = np.array(pred_list).reshape((-1, 1))
            pred_list = (pred_list + 1) / 2
            pred_list = np.hstack(
                (np.ones_like(pred_list) - pred_list, pred_list))
        else:
            pred_list = np.array(pred_list)
        labels = simnet_process.get_test_label()

        metric.update(pred_list, labels)
        if args.compute_accuracy:
            acc = utils.get_accuracy(pred_list, labels, args.task_mode,
                                     args.lamda)
            logging.info("AUC of test is %f, Accuracy of test is %f" %
                         (metric.eval(), acc))
        else:
            logging.info("AUC of test is %f" % metric.eval())

    if args.verbose_result:
        utils.get_result_file(args)
        logging.info("test result saved in %s" %
                     os.path.join(os.getcwd(), args.test_result_path))
Пример #3
0
def test(conf_dict, args):
    """
    Evaluation Function
    """
    logging.info("start test process ...")
    if args.use_cuda:
        place = fluid.CUDAPlace(0)
    else:
        place = fluid.CPUPlace()
    with fluid.dygraph.guard(place):

        vocab = utils.load_vocab(args.vocab_path)
        simnet_process = reader.SimNetProcessor(args, vocab)
        test_pyreader = fluid.io.PyReader(capacity=16,
                                          return_list=True,
                                          use_double_buffer=True)
        get_test_examples = simnet_process.get_reader("test")
        test_pyreader.decorate_sample_list_generator(
            paddle.batch(get_test_examples, batch_size=args.batch_size), place)

        conf_dict['dict_size'] = len(vocab)
        conf_dict['seq_len'] = args.seq_len

        net = utils.import_class("./nets", conf_dict["net"]["module_name"],
                                 conf_dict["net"]["class_name"])(conf_dict)

        model, _ = load_dygraph(args.init_checkpoint)
        net.set_dict(model)
        metric = fluid.metrics.Auc(name="auc")
        pred_list = []
        with io.open("predictions.txt", "w",
                     encoding="utf8") as predictions_file:
            if args.task_mode == "pairwise":
                for left, pos_right in test_pyreader():
                    left = fluid.layers.reshape(left, shape=[-1, 1])
                    pos_right = fluid.layers.reshape(pos_right, shape=[-1, 1])

                    left_feat, pos_score = net(left, pos_right)
                    pred = pos_score
                    # pred_list += list(pred.numpy())

                    pred_list += list(
                        map(lambda item: float(item[0]), pred.numpy()))
                    predictions_file.write(u"\n".join(
                        map(lambda item: str(
                            (item[0] + 1) / 2), pred.numpy())) + "\n")

            else:
                for left, right in test_pyreader():
                    left = fluid.layers.reshape(left, shape=[-1, 1])
                    right = fluid.layers.reshape(right, shape=[-1, 1])
                    left_feat, pred = net(left, right)
                    # pred_list += list(pred.numpy())

                    pred_list += list(
                        map(lambda item: float(item[0]), pred.numpy()))
                    predictions_file.write(u"\n".join(
                        map(lambda item: str(np.argmax(item)), pred.numpy())) +
                                           "\n")

            if args.task_mode == "pairwise":
                pred_list = np.array(pred_list).reshape((-1, 1))
                pred_list = (pred_list + 1) / 2
                pred_list = np.hstack(
                    (np.ones_like(pred_list) - pred_list, pred_list))
            else:
                pred_list = np.array(pred_list)
            labels = simnet_process.get_test_label()

            metric.update(pred_list, labels)
            if args.compute_accuracy:
                acc = utils.get_accuracy(pred_list, labels, args.task_mode,
                                         args.lamda)
                logging.info("AUC of test is %f, Accuracy of test is %f" %
                             (metric.eval(), acc))
            else:
                logging.info("AUC of test is %f" % metric.eval())

        if args.verbose_result:
            utils.get_result_file(args)
            logging.info("test result saved in %s" %
                         os.path.join(os.getcwd(), args.test_result_path))
Пример #4
0
def test(conf_dict, args):
    """
    run predict
    """
    vocab = utils.load_vocab(args.vocab_path)
    simnet_process = reader.SimNetProcessor(args, vocab)
    # load auc method
    metric = fluid.metrics.Auc(name="auc")
    with open("predictions.txt", "w") as predictions_file:
        # Get model path
        model_path = args.init_checkpoint
        # Get device
        if args.use_cuda:
            place = fluid.CUDAPlace(0)
        else:
            place = fluid.CPUPlace()
        # Get executor
        executor = fluid.Executor(place=place)
        # Load model
        program, feed_var_names, fetch_targets = fluid.io.load_inference_model(
            model_path, executor)
        if args.task_mode == "pairwise":
            # Get Feeder and Reader
            feeder = fluid.DataFeeder(place=place,
                                      feed_list=feed_var_names,
                                      program=program)
            test_reader = simnet_process.get_reader("test")
        else:
            # Get Feeder and Reader
            feeder = fluid.DataFeeder(place=place,
                                      feed_list=feed_var_names,
                                      program=program)
            test_reader = simnet_process.get_reader("test")
        # Get batch data iterator
        batch_data = paddle.batch(test_reader,
                                  args.batch_size,
                                  drop_last=False)
        logging.info("start test process ...")
        pred_list = []
        for iter, data in enumerate(batch_data()):
            output = executor.run(program,
                                  feed=feeder.feed(data),
                                  fetch_list=fetch_targets)
            if args.task_mode == "pairwise":
                pred_list += list(map(lambda item: float(item[0]), output[1]))
                predictions_file.write("\n".join(
                    map(lambda item: str((item[0] + 1) / 2), output[1])) +
                                       "\n")
            else:
                pred_list += map(lambda item: item, output[1])
                predictions_file.write("\n".join(
                    map(lambda item: str(np.argmax(item)), output[1])) + "\n")
        if args.task_mode == "pairwise":
            pred_list = np.array(pred_list).reshape((-1, 1))
            pred_list = (pred_list + 1) / 2
            pred_list = np.hstack(
                (np.ones_like(pred_list) - pred_list, pred_list))
        else:
            pred_list = np.array(pred_list)
        labels = simnet_process.get_test_label()

        metric.update(pred_list, labels)
        if args.compute_accuracy:
            acc = utils.get_accuracy(pred_list, labels, args.task_mode,
                                     args.lamda)
            logging.info("AUC of test is %f, Accuracy of test is %f" %
                         (metric.eval(), acc))
        else:
            logging.info("AUC of test is %f" % metric.eval())

    if args.verbose_result:
        utils.get_result_file(args)
        logging.info("test result saved in %s" %
                     os.path.join(os.getcwd(), args.test_result_path))