示例#1
0
def save_serving_model(save_dir, exe, feed_vars, test_fetches, infer_prog):
    feed_var_names = [var.name for var in feed_vars.values()]
    fetch_list = sorted(test_fetches.items(), key=lambda i: i[0])
    target_vars = [var[1] for var in fetch_list]
    feed_var_names = prune_feed_vars(feed_var_names, target_vars, infer_prog)
    output_dir = save_dir
    serving_client = os.path.join(output_dir, 'serving_client')
    serving_server = os.path.join(output_dir, 'serving_server')
    logger.info(
        "Export serving model to {}, client side: {}, server side: {}. input: {}, output: "
        "{}...".format(output_dir, serving_client, serving_server,
                       feed_var_names, [str(var.name) for var in target_vars]))
    feed_dict = {x: infer_prog.global_block().var(x) for x in feed_var_names}
    fetch_dict = {x.name: x for x in target_vars}
    import paddle_serving_client.io as serving_io
    serving_client = os.path.join(save_dir, 'serving_client')
    serving_server = os.path.join(save_dir, 'serving_server')
    serving_io.save_model(
        client_config_folder=serving_client,
        server_model_folder=serving_server,
        feed_var_dict=feed_dict,
        fetch_var_dict=fetch_dict,
        main_program=infer_prog)
    shutil.copy(serving_client+'/serving_client_conf.prototxt', save_dir+'/serving_server_conf.prototxt')
    shutil.rmtree('-p')
示例#2
0
def main():
    args = parse_args()

    model = architectures.__dict__[args.model]()

    place = fluid.CPUPlace()
    exe = fluid.Executor(place)

    startup_prog = fluid.Program()
    infer_prog = fluid.Program()

    with fluid.program_guard(infer_prog, startup_prog):
        with fluid.unique_name.guard():
            image = create_input(args.img_size)
            out = create_model(args, model, image, class_dim=args.class_dim)

    infer_prog = infer_prog.clone(for_test=True)
    fluid.load(program=infer_prog,
               model_path=args.pretrained_model,
               executor=exe)

    model_path = os.path.join(args.output_path, "ppcls_model")
    conf_path = os.path.join(args.output_path, "ppcls_client_conf")
    serving_io.save_model(model_path, conf_path, {"image": image},
                          {"prediction": out}, infer_prog)
示例#3
0
    def save_serving_model(self, model_path, client_conf_path):
        feed_vars = {}
        target_vars = {}
        for target in self._target_names:
            tmp_target = self._main_program.block(0)._find_var_recursive(
                target)
            target_vars[target] = tmp_target

        for feed in self._feed_names:
            tmp_feed = self._main_program.block(0)._find_var_recursive(feed)
            feed_vars[feed] = tmp_feed

        serving_io.save_model(model_path, client_conf_path, feed_vars,
                              target_vars, self._main_program)
示例#4
0
def export_serving_model(args):
    """
    Export PaddlePaddle inference model for prediction depolyment and serving.
    """
    print("Exporting serving model...")
    startup_prog = fluid.Program()
    infer_prog = fluid.Program()
    image, logit_out = build_model(infer_prog,
                                   startup_prog,
                                   phase=ModelPhase.PREDICT)

    # Use CPU for exporting inference model instead of GPU
    place = fluid.CPUPlace()
    exe = fluid.Executor(place)
    exe.run(startup_prog)
    infer_prog = infer_prog.clone(for_test=True)

    if os.path.exists(cfg.TEST.TEST_MODEL):
        print('load test model:', cfg.TEST.TEST_MODEL)
        try:
            fluid.load(infer_prog, os.path.join(cfg.TEST.TEST_MODEL, 'model'),
                       exe)
        except:
            fluid.io.load_params(exe,
                                 cfg.TEST.TEST_MODEL,
                                 main_program=infer_prog)
    else:
        print("TEST.TEST_MODEL diretory is empty!")
        exit(-1)

    from paddle_serving_client.io import save_model
    save_model(
        cfg.FREEZE.SAVE_DIR + "/serving_server",
        cfg.FREEZE.SAVE_DIR + "/serving_client",
        {image.name: image},
        {logit_out.name: logit_out},
        infer_prog,
    )
    print("Serving model exported!")
    print("Exporting serving model config...")
    deploy_cfg_path = export_inference_config()
    print("Serving model saved : [%s]" % (deploy_cfg_path))
def save_serving_model(FLAGS, exe, feed_vars, test_fetches, infer_prog):
    cfg_name = os.path.basename(FLAGS.config).split('.')[0]
    save_dir = os.path.join(FLAGS.output_dir, cfg_name)
    feed_var_names = [var.name for var in feed_vars.values()]
    fetch_list = sorted(test_fetches.items(), key=lambda i: i[0])
    target_vars = [var[1] for var in fetch_list]
    feed_var_names = prune_feed_vars(feed_var_names, target_vars, infer_prog)
    serving_client = os.path.join(FLAGS.output_dir, 'serving_client')
    serving_server = os.path.join(FLAGS.output_dir, 'serving_server')
    logger.info(
        "Export serving model to {}, client side: {}, server side: {}. input: {}, output: "
        "{}...".format(FLAGS.output_dir, serving_client, serving_server,
                       feed_var_names, [str(var.name) for var in target_vars]))
    feed_dict = {x: infer_prog.global_block().var(x) for x in feed_var_names}
    fetch_dict = {x.name: x for x in target_vars}
    import paddle_serving_client.io as serving_io
    serving_client = os.path.join(save_dir, 'serving_client')
    serving_server = os.path.join(save_dir, 'serving_server')
    serving_io.save_model(serving_client, serving_server, feed_dict,
                          fetch_dict, infer_prog)
示例#6
0
    label = fluid.layers.data(name="label", shape=[1], dtype="int64")

    dataset = fluid.DatasetFactory().create_dataset()
    filelist = ["train_data/%s" % x for x in os.listdir("train_data")]
    dataset.set_use_var([data, label])
    pipe_command = "python imdb_reader.py"
    dataset.set_pipe_command(pipe_command)
    dataset.set_batch_size(128)
    dataset.set_filelist(filelist)
    dataset.set_thread(10)
    avg_cost, acc, prediction = lstm_net(data, label, dict_dim)
    optimizer = fluid.optimizer.SGD(learning_rate=0.01)
    optimizer.minimize(avg_cost)

    exe = fluid.Executor(fluid.CPUPlace())
    exe.run(fluid.default_startup_program())
    epochs = 6

    import paddle_serving_client.io as serving_io

    for i in range(epochs):
        exe.train_from_dataset(program=fluid.default_main_program(),
                               dataset=dataset,
                               debug=False)
        logger.info("TRAIN --> pass: {}".format(i))
        if i == 5:
            serving_io.save_model("{}_model".format(model_name),
                                  "{}_client_conf".format(model_name),
                                  {"words": data}, {"prediction": prediction},
                                  fluid.default_main_program())
示例#7
0
def train():
    args = parse_args()
    sparse_only = args.sparse_only
    if not os.path.isdir(args.model_output_dir):
        os.mkdir(args.model_output_dir)
    dense_input = fluid.layers.data(name="dense_input",
                                    shape=[dense_feature_dim],
                                    dtype='float32')
    sparse_input_ids = [
        fluid.layers.data(name="C" + str(i),
                          shape=[1],
                          lod_level=1,
                          dtype="int64") for i in range(1, 27)
    ]
    label = fluid.layers.data(name='label', shape=[1], dtype='int64')

    #nn_input = None if sparse_only else dense_input
    nn_input = dense_input
    predict_y, loss, auc_var, batch_auc_var, infer_vars = dnn_model(
        nn_input, sparse_input_ids, label, args.embedding_size,
        args.sparse_feature_dim)

    optimizer = fluid.optimizer.SGD(learning_rate=1e-4)
    optimizer.minimize(loss)

    exe = fluid.Executor(fluid.CPUPlace())
    exe.run(fluid.default_startup_program())
    dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset")
    dataset.set_use_var([dense_input] + sparse_input_ids + [label])

    python_executable = "python3.6"
    pipe_command = "{} criteo_reader.py {}".format(python_executable,
                                                   args.sparse_feature_dim)

    dataset.set_pipe_command(pipe_command)
    dataset.set_batch_size(128)
    thread_num = 10
    dataset.set_thread(thread_num)

    whole_filelist = [
        "raw_data/part-%d" % x for x in range(len(os.listdir("raw_data")))
    ]

    print(whole_filelist)
    dataset.set_filelist(whole_filelist[:100])
    dataset.load_into_memory()
    fluid.layers.Print(auc_var)
    epochs = 1
    for i in range(epochs):
        exe.train_from_dataset(program=fluid.default_main_program(),
                               dataset=dataset,
                               debug=True)
        print("epoch {} finished".format(i))

    import paddle_serving_client.io as server_io
    feed_var_dict = {}
    feed_var_dict['dense_input'] = dense_input
    for i, sparse in enumerate(sparse_input_ids):
        feed_var_dict["embedding_{}.tmp_0".format(i)] = sparse
    fetch_var_dict = {"prob": predict_y}

    feed_kv_dict = {}
    feed_kv_dict['dense_input'] = dense_input
    for i, emb in enumerate(infer_vars):
        feed_kv_dict["embedding_{}.tmp_0".format(i)] = emb
    fetch_var_dict = {"prob": predict_y}

    server_io.save_model("ctr_serving_model", "ctr_client_conf", feed_var_dict,
                         fetch_var_dict, fluid.default_main_program())

    server_io.save_model("ctr_serving_model_kv", "ctr_client_conf_kv",
                         feed_kv_dict, fetch_var_dict,
                         fluid.default_main_program())
示例#8
0
model_name = "bert_chinese_L-12_H-768_A-12"
module = hub.Module(name=model_name)
inputs, outputs, program = module.context(trainable=True,
                                          max_seq_len=int(sys.argv[1]))
place = fluid.core_avx.CPUPlace()
exe = fluid.Executor(place)
input_ids = inputs["input_ids"]
position_ids = inputs["position_ids"]
segment_ids = inputs["segment_ids"]
input_mask = inputs["input_mask"]
pooled_output = outputs["pooled_output"]
sequence_output = outputs["sequence_output"]

feed_var_names = [
    input_ids.name, position_ids.name, segment_ids.name, input_mask.name
]

target_vars = [pooled_output, sequence_output]

serving_io.save_model(
    "bert_seq{}_model".format(sys.argv[1]),
    "bert_seq{}_client".format(sys.argv[1]), {
        "input_ids": input_ids,
        "position_ids": position_ids,
        "segment_ids": segment_ids,
        "input_mask": input_mask,
    }, {
        "pooled_output": pooled_output,
        "sequence_output": sequence_output
    }, program)
示例#9
0
                            batch_size=16)

test_reader = paddle.batch(paddle.reader.shuffle(
    paddle.dataset.uci_housing.test(), buf_size=500),
                           batch_size=16)

x = fluid.data(name='x', shape=[None, 13], dtype='float32')
y = fluid.data(name='y', shape=[None, 1], dtype='float32')

y_predict = fluid.layers.fc(input=x, size=1, act=None)
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_loss = fluid.layers.mean(cost)
sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.01)
sgd_optimizer.minimize(avg_loss)

place = fluid.CPUPlace()
feeder = fluid.DataFeeder(place=place, feed_list=[x, y])
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())

import paddle_serving_client.io as serving_io

for pass_id in range(30):
    for data_train in train_reader():
        avg_loss_value, = exe.run(fluid.default_main_program(),
                                  feed=feeder.feed(data_train),
                                  fetch_list=[avg_loss])

serving_io.save_model("uci_housing_model", "uci_housing_client", {"x": x},
                      {"price": y_predict}, fluid.default_main_program())