Beispiel #1
0
def infer():
    args = parse_args()
    print(args)

    if args.use_gpu == 1:
        place = fluid.CUDAPlace(0)
    else:
        place = fluid.CPUPlace()
    inference_scope = fluid.Scope()

    test_files = [
        os.path.join(args.test_data_dir, x)
        for x in os.listdir(args.test_data_dir)
    ]
    criteo_dataset = CriteoDataset()
    test_reader = paddle.batch(criteo_dataset.test(test_files),
                               batch_size=args.batch_size)

    startup_program = fluid.framework.Program()
    test_program = fluid.framework.Program()
    cur_model_path = os.path.join(args.model_output_dir,
                                  'epoch_' + args.test_epoch)

    with fluid.scope_guard(inference_scope):
        with fluid.framework.program_guard(test_program, startup_program):
            loss, auc, data_list, auc_states = eval('network_conf.' +
                                                    args.model_name)(
                                                        args.embedding_size,
                                                        args.num_field,
                                                        args.num_feat,
                                                        args.layer_sizes_dnn,
                                                        args.act, args.reg,
                                                        args.layer_sizes_cin)

            exe = fluid.Executor(place)
            feeder = fluid.DataFeeder(feed_list=data_list, place=place)
            fluid.io.load_persistables(
                executor=exe,
                dirname=cur_model_path,
                main_program=fluid.default_main_program())

            for var in auc_states:  # reset auc states
                set_zero(var.name, scope=inference_scope, place=place)

            loss_all = 0
            num_ins = 0
            for batch_id, data_test in enumerate(test_reader()):
                loss_val, auc_val = exe.run(test_program,
                                            feed=feeder.feed(data_test),
                                            fetch_list=[loss.name, auc.name])

                num_ins += len(data_test)
                loss_all += loss_val * len(data_test)
                logger.info('TEST --> batch: {} loss: {} auc_val: {}'.format(
                    batch_id + 1, loss_all / num_ins, auc_val))

            print(
                'The last log info is the total Logloss and AUC for all test data. '
            )
Beispiel #2
0
def infer():
    args = parse_args()

    place = fluid.CPUPlace()
    inference_scope = fluid.core.Scope()

    filelist = [
        "%s/%s" % (args.data_path, x) for x in os.listdir(args.data_path)
    ]
    from criteo_reader import CriteoDataset
    criteo_dataset = CriteoDataset()
    criteo_dataset.setup(args.sparse_feature_dim)
    exe = fluid.Executor(place)

    train_thread_num = 10
    whole_filelist = [
        "raw_data/part-%d" % x for x in range(len(os.listdir("raw_data")))
    ]
    test_files = whole_filelist[int(0.8 * len(whole_filelist)
                                    ):int(0.85 * len(whole_filelist))]

    #file_groups = [whole_filelist[i:i+train_thread_num] for i in range(0, len(whole_filelist), train_thread_num)]

    def set_zero(var_name):
        param = inference_scope.var(var_name).get_tensor()
        param_array = np.zeros(param._get_dims()).astype("int64")
        param.set(param_array, place)

    epochs = 20
    for i in range(epochs):
        cur_model_path = args.model_path + "/epoch" + str(i + 1) + ".model"
        with fluid.scope_guard(inference_scope):
            [inference_program, feed_target_names, fetch_targets] = \
                        fluid.io.load_inference_model(cur_model_path, exe)
            auc_states_names = ['_generated_var_2', '_generated_var_3']
            for name in auc_states_names:
                set_zero(name)

            test_reader = criteo_dataset.infer_reader(test_files, 1000, 100000)
            for batch_id, data in enumerate(test_reader()):
                loss_val, auc_val = exe.run(inference_program,
                                            feed=data2tensor(data, place),
                                            fetch_list=fetch_targets)
            print("train_pass_%d, test_pass_%d\t%f" % (i - 1, i, auc_val))