Beispiel #1
0
def serving_encryption():
    inference_model_to_serving(
        dirname="./uci_housing_model",
        params_filename=None,
        serving_server="encrypt_server",
        serving_client="encrypt_client",
        encryption=True)
Beispiel #2
0
def run(FLAGS, cfg):
    # build detector
    trainer = Trainer(cfg, mode='test')

    # load weights
    if cfg.architecture in ['DeepSORT', 'ByteTrack']:
        trainer.load_weights_sde(cfg.det_weights, cfg.reid_weights)
    else:
        trainer.load_weights(cfg.weights)

    # export model
    trainer.export(FLAGS.output_dir)

    if FLAGS.export_serving_model:
        from paddle_serving_client.io import inference_model_to_serving
        model_name = os.path.splitext(os.path.split(cfg.filename)[-1])[0]

        inference_model_to_serving(
            dirname="{}/{}".format(FLAGS.output_dir, model_name),
            serving_server="{}/{}/serving_server".format(FLAGS.output_dir,
                                                         model_name),
            serving_client="{}/{}/serving_client".format(FLAGS.output_dir,
                                                         model_name),
            model_filename="model.pdmodel",
            params_filename="model.pdiparams")
Beispiel #3
0
def serving_encryption():
    inference_model_to_serving(dirname="./DarkNet53/ppcls_model/",
                               model_filename="__model__",
                               params_filename="./__params__",
                               serving_server="encrypt_server",
                               serving_client="encrypt_client",
                               encryption=True)
Beispiel #4
0
def do_export(model_dir):
    feed_names, fetch_names = serving_io.inference_model_to_serving(
        dirname=model_dir,
        serving_server="transformer_server",
        serving_client="transformer_client",
        model_filename="transformer.pdmodel",
        params_filename="transformer.pdiparams")

    print("model feed_names : %s" % feed_names)
    print("model fetch_names : %s" % fetch_names)
def run(FLAGS, cfg):
    # build detector
    trainer = Trainer(cfg, mode='test')

    # load weights
    trainer.load_weights(cfg.weights, 'resume')

    # export model
    trainer.export(FLAGS.output_dir)

    if FLAGS.export_serving_model:
        from paddle_serving_client.io import inference_model_to_serving
        model_name = os.path.splitext(os.path.split(cfg.filename)[-1])[0]

        inference_model_to_serving(
            dirname="{}/{}".format(FLAGS.output_dir, model_name),
            serving_server="{}/{}/serving_server".format(
                FLAGS.output_dir, model_name),
            serving_client="{}/{}/serving_client".format(
                FLAGS.output_dir, model_name),
            model_filename="model.pdmodel",
            params_filename="model.pdiparams")
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--inference_model_dir",
        type=str,
        default="./export/",
        help="The directory of the inference model.")
    parser.add_argument(
        "--model_file",
        type=str,
        default='inference.pdmodel',
        help="The inference model file name.")
    parser.add_argument(
        "--params_file",
        type=str,
        default='inference.pdiparams',
        help="The input inference parameters file name.")
    return parser.parse_args()


if __name__ == '__main__':
    paddle.enable_static()
    args = parse_args()
    feed_names, fetch_names = serving_io.inference_model_to_serving(
        dirname=args.inference_model_dir,
        serving_server="serving_server",
        serving_client="serving_client",
        model_filename=args.model_file,
        params_filename=args.params_file)
    print("model feed_names : %s" % feed_names)
    print("model fetch_names : %s" % fetch_names)
Beispiel #7
0
    help=
    'set alias names for feed vars, split by comma \',\', you should run --show_proto to check the number of feed vars'
)
parser.add_argument(
    "--fetch_alias_names",
    type=str,
    default=None,
    help=
    'set alias names for feed vars, split by comma \',\', you should run --show_proto to check the number of fetch vars'
)
parser.add_argument(
    "--show_proto",
    type=bool,
    default=False,
    help=
    'If yes, you can preview the proto and then determine your feed var alias name and fetch var alias name.'
)

if __name__ == "__main__":
    paddle.enable_static()
    args = parser.parse_args()
    feed_names, fetch_names = serving_io.inference_model_to_serving(
        dirname=args.dirname,
        serving_server=args.server_path,
        serving_client=args.client_path,
        model_filename=args.model_filename,
        params_filename=args.params_filename,
        show_proto=args.show_proto,
        feed_alias_names=args.feed_alias_names,
        fetch_alias_names=args.fetch_alias_names)
Beispiel #8
0
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import argparse
from paddle_serving_client.io import inference_model_to_serving


def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("--model_dir", type=str)
    parser.add_argument("--server_dir", type=str, default="serving_server_dir")
    parser.add_argument("--client_dir", type=str, default="serving_client_dir")
    return parser.parse_args()


args = parse_args()
inference_model_dir = args.model_dir
serving_server_dir = os.path.join(args.model_dir, args.server_dir)
serving_client_dir = os.path.join(args.model_dir, args.client_dir)
feed_var_names, fetch_var_names = inference_model_to_serving(
    inference_model_dir,
    serving_server_dir,
    serving_client_dir,
    model_filename="model",
    params_filename="params")

print("success!")