def init_model(self): parser = ArgsParser() parser.add_argument("-c", "--config", type=str, default=self.cfg_path, help="configuration file to use") parser.add_argument( "--draw_threshold", type=float, default=self.conf_thres, help="Threshold to reserve the result for visualization.") args = parser.parse_args() cfg = load_config(args.config) cfg.weights = self.weights cfg.use_gpu = self.use_gpu paddle.set_device('gpu' if cfg.use_gpu else 'cpu') if 'norm_type' in cfg and cfg[ 'norm_type'] == 'sync_bn' and not cfg.use_gpu: cfg['norm_type'] = 'bn' check_config(cfg) check_gpu(cfg.use_gpu) check_version() """ build trainer """ trainer = Trainer(cfg, mode='test') """ load weights """ trainer.load_weights(cfg.weights) return trainer
def run(FLAGS, cfg): # build detector trainer = Trainer(cfg, mode='test') # load weights if cfg.architecture in ['DeepSORT']: if cfg.det_weights != 'None': trainer.load_weights_sde(cfg.det_weights, cfg.reid_weights) else: trainer.load_weights_sde(None, cfg.reid_weights) else: trainer.load_weights(cfg.weights) # export model trainer.export(FLAGS.output_dir) if FLAGS.export_serving_model: from paddle_serving_client.io import inference_model_to_serving model_name = os.path.splitext(os.path.split(cfg.filename)[-1])[0] inference_model_to_serving( dirname="{}/{}".format(FLAGS.output_dir, model_name), serving_server="{}/{}/serving_server".format( FLAGS.output_dir, model_name), serving_client="{}/{}/serving_client".format( FLAGS.output_dir, model_name), model_filename="model.pdmodel", params_filename="model.pdiparams")
def run(FLAGS, cfg): # build detector trainer = Trainer(cfg, mode='eval') # load weights if cfg.architecture in ['DeepSORT']: if cfg.det_weights != 'None': trainer.load_weights_sde(cfg.det_weights, cfg.reid_weights) else: trainer.load_weights_sde(None, cfg.reid_weights) else: trainer.load_weights(cfg.weights) # post quant model trainer.post_quant(FLAGS.output_dir)
def test_eval_mstest(self): cfg = load_config(self.mstest_cfg_file) trainer = Trainer(cfg, mode='eval') cfg.weights = 'https://paddledet.bj.bcebos.com/models/faster_rcnn_r34_fpn_1x_coco.pdparams' trainer.load_weights(cfg.weights) trainer.evaluate()
def run(FLAGS, cfg): # build detector trainer = Trainer(cfg, mode='test') # load weights trainer.load_weights(cfg.weights, 'resume') # export model trainer.export(FLAGS.output_dir)
def run(FLAGS, cfg): # build trainer trainer = Trainer(cfg, mode='eval') # load weights trainer.load_weights(cfg.weights, 'resume') # training trainer.evaluate()
def run(FLAGS, cfg): # init parallel environment if nranks > 1 init_parallel_env() # build trainer trainer = Trainer(cfg, mode='eval') # load weights trainer.load_weights(cfg.weights, 'resume') # training trainer.evaluate()
def run(FLAGS, cfg): # build trainer trainer = Trainer(cfg, mode='test') # load weights trainer.load_weights(cfg.weights, 'resume') # get inference images images = get_test_images(FLAGS.infer_dir, FLAGS.infer_img) # inference trainer.predict(images, draw_threshold=FLAGS.draw_threshold, output_dir=FLAGS.output_dir)
def run(FLAGS, cfg): # init fleet environment if cfg.fleet: init_fleet_env() else: # init parallel environment if nranks > 1 init_parallel_env() if FLAGS.enable_ce: set_random_seed(0) # build trainer trainer = Trainer(cfg, mode='train') # load weights if FLAGS.resume is not None: trainer.resume_weights(FLAGS.resume) elif 'pretrain_weights' in cfg and cfg.pretrain_weights: trainer.load_weights(cfg.pretrain_weights) # training trainer.train(FLAGS.eval)
def run(FLAGS, cfg): # init parallel environment if nranks > 1 init_parallel_env() if FLAGS.enable_ce: set_random_seed(0) # build trainer trainer = Trainer(cfg, mode='train') # load weights trainer.load_weights(cfg.pretrain_weights, FLAGS.weight_type) # training trainer.train()
def test_infer_mstest(self): cfg = load_config(self.mstest_cfg_file) trainer = Trainer(cfg, mode='test') cfg.weights = 'https://paddledet.bj.bcebos.com/models/faster_rcnn_r34_fpn_1x_coco.pdparams' trainer.load_weights(cfg.weights) tests_img_root = os.path.join(os.path.dirname(__file__), 'imgs') # input images to predict imgs = ['coco2017_val2017_000000000139.jpg', 'coco2017_val2017_000000000724.jpg'] imgs = [os.path.join(tests_img_root, img) for img in imgs] trainer.predict(imgs, draw_threshold=0.5, output_dir='output', save_txt=True)
def run(FLAGS, cfg): # init fleet environment if cfg.fleet: init_fleet_env() else: # init parallel environment if nranks > 1 init_parallel_env() if FLAGS.enable_ce: set_random_seed(0) # build trainer trainer = Trainer(cfg, mode='train') # load weights if not FLAGS.slim_config: trainer.load_weights(cfg.pretrain_weights, FLAGS.weight_type) # training trainer.train(FLAGS.eval)
def run(FLAGS, cfg): if FLAGS.json_eval: logger.info( "In json_eval mode, PaddleDetection will evaluate json files in " "output_eval directly. And proposal.json, bbox.json and mask.json " "will be detected by default.") json_eval_results(cfg.metric, json_directory=FLAGS.output_eval, dataset=cfg['EvalDataset']) return # init parallel environment if nranks > 1 init_parallel_env() # build trainer trainer = Trainer(cfg, mode='eval') # load weights trainer.load_weights(cfg.weights) # training trainer.evaluate()
def run(FLAGS, cfg): # build detector trainer = Trainer(cfg, mode='test') # load weights trainer.load_weights(cfg.weights, 'resume') # export model trainer.export(FLAGS.output_dir) if FLAGS.export_serving_model: from paddle_serving_client.io import inference_model_to_serving model_name = os.path.splitext(os.path.split(cfg.filename)[-1])[0] inference_model_to_serving( dirname="{}/{}".format(FLAGS.output_dir, model_name), serving_server="{}/{}/serving_server".format( FLAGS.output_dir, model_name), serving_client="{}/{}/serving_client".format( FLAGS.output_dir, model_name), model_filename="model.pdmodel", params_filename="model.pdiparams")