예제 #1
0
def run(FLAGS, cfg):
    # build detector
    trainer = Trainer(cfg, mode='test')

    # load weights
    if cfg.architecture in ['DeepSORT', 'ByteTrack']:
        trainer.load_weights_sde(cfg.det_weights, cfg.reid_weights)
    else:
        trainer.load_weights(cfg.weights)

    # export model
    trainer.export(FLAGS.output_dir)

    if FLAGS.export_serving_model:
        from paddle_serving_client.io import inference_model_to_serving
        model_name = os.path.splitext(os.path.split(cfg.filename)[-1])[0]

        inference_model_to_serving(
            dirname="{}/{}".format(FLAGS.output_dir, model_name),
            serving_server="{}/{}/serving_server".format(FLAGS.output_dir,
                                                         model_name),
            serving_client="{}/{}/serving_client".format(FLAGS.output_dir,
                                                         model_name),
            model_filename="model.pdmodel",
            params_filename="model.pdiparams")
예제 #2
0
def run(FLAGS, cfg):
    # build detector
    trainer = Trainer(cfg, mode='eval')

    # load weights
    if cfg.architecture in ['DeepSORT']:
        if cfg.det_weights != 'None':
            trainer.load_weights_sde(cfg.det_weights, cfg.reid_weights)
        else:
            trainer.load_weights_sde(None, cfg.reid_weights)
    else:
        trainer.load_weights(cfg.weights)

    # post quant model
    trainer.post_quant(FLAGS.output_dir)