Ejemplo n.º 1
0
def build_slim_model(cfg, slim_cfg, mode='train'):
    with open(slim_cfg) as f:
        slim_load_cfg = yaml.load(f, Loader=yaml.Loader)
    if mode != 'train' and slim_load_cfg['slim'] == 'Distill':
        return cfg

    if slim_load_cfg['slim'] == 'Distill':
        model = DistillModel(cfg, slim_cfg)
        cfg['model'] = model
    elif slim_load_cfg['slim'] == 'DistillPrune':
        if mode == 'train':
            model = DistillModel(cfg, slim_cfg)
            pruner = create(cfg.pruner)
            pruner(model.student_model)
        else:
            model = create(cfg.architecture)
            weights = cfg.weights
            load_config(slim_cfg)
            pruner = create(cfg.pruner)
            model = pruner(model)
            load_pretrain_weight(model, weights)
        cfg['model'] = model
    else:
        load_config(slim_cfg)
        model = create(cfg.architecture)
        if mode == 'train':
            load_pretrain_weight(model, cfg.pretrain_weights)
        slim = create(cfg.slim)
        cfg['model'] = slim(model)
        cfg['slim'] = slim
        if mode != 'train':
            load_pretrain_weight(cfg['model'], cfg.weights)

    return cfg
Ejemplo n.º 2
0
def main():
    FLAGS = parse_args()

    cfg = load_config(FLAGS.config)
    merge_config(FLAGS.opt)
    if FLAGS.slim_config:
        slim_cfg = load_config(FLAGS.slim_config)
        merge_config(slim_cfg)
    check_config(cfg)
    check_gpu(cfg.use_gpu)
    check_version()

    place = 'gpu:{}'.format(ParallelEnv().dev_id) if cfg.use_gpu else 'cpu'
    place = paddle.set_device(place)
    run(FLAGS, cfg)
Ejemplo n.º 3
0
def main():
    FLAGS = parse_args()
    cfg = load_config(FLAGS.config)
    # TODO: bias should be unified
    cfg['bias'] = 1 if FLAGS.bias else 0
    cfg['classwise'] = True if FLAGS.classwise else False
    cfg['output_eval'] = FLAGS.output_eval
    cfg['save_prediction_only'] = FLAGS.save_prediction_only
    merge_config(FLAGS.opt)

    # disable npu in config by default
    if 'use_npu' not in cfg:
        cfg.use_npu = False

    if cfg.use_gpu:
        place = paddle.set_device('gpu')
    elif cfg.use_npu:
        place = paddle.set_device('npu')
    else:
        place = paddle.set_device('cpu')

    if 'norm_type' in cfg and cfg['norm_type'] == 'sync_bn' and not cfg.use_gpu:
        cfg['norm_type'] = 'bn'

    if FLAGS.slim_config:
        cfg = build_slim_model(cfg, FLAGS.slim_config, mode='eval')

    check_config(cfg)
    check_gpu(cfg.use_gpu)
    check_npu(cfg.use_npu)
    check_version()

    run(FLAGS, cfg)
Ejemplo n.º 4
0
    def build_program(self, config):
        from ppdet.core.workspace import load_config, create
        from ppdet.utils.check import check_version, check_config

        cfg = load_config(config)
        check_config(cfg)
        check_version()

        lr_builder = create("LearningRate")
        optimizer_builder = create("OptimizerBuilder")

        # build program
        self.startup_program = fluid.Program()
        train_program = fluid.Program()
        with fluid.program_guard(train_program, self.startup_program):
            with fluid.unique_name.guard():
                model = create(cfg.architecture)

                inputs_def = cfg["TrainReader"]["inputs_def"]
                # can't compile with dataloader now.
                inputs_def["use_dataloader"] = False
                feed_vars, _ = model.build_inputs(**inputs_def)

                train_fetches = model.train(feed_vars)
                loss = train_fetches["loss"]
                lr = lr_builder()
                optimizer = optimizer_builder(lr)
                optimizer.minimize(loss)

        self.loss = loss
        self.feeds = feed_vars
Ejemplo n.º 5
0
def main():
    FLAGS = parse_args()
    cfg = load_config(FLAGS.config)
    cfg['fp16'] = FLAGS.fp16
    cfg['fleet'] = FLAGS.fleet
    cfg['use_vdl'] = FLAGS.use_vdl
    cfg['vdl_log_dir'] = FLAGS.vdl_log_dir
    cfg['save_prediction_only'] = FLAGS.save_prediction_only
    cfg['save_proposals'] = FLAGS.save_proposals
    cfg['proposals_path'] = FLAGS.proposals_path
    merge_config(FLAGS.opt)

    place = paddle.set_device('gpu' if cfg.use_gpu else 'cpu')

    if 'norm_type' in cfg and cfg['norm_type'] == 'sync_bn' and not cfg.use_gpu:
        cfg['norm_type'] = 'bn'

    if FLAGS.slim_config:
        cfg = build_slim_model(cfg, FLAGS.slim_config)

    check.check_config(cfg)
    check.check_gpu(cfg.use_gpu)
    check.check_version()

    run(FLAGS, cfg)
Ejemplo n.º 6
0
def main():
    cfg = load_config(FLAGS.config)
    merge_config(FLAGS.opt)
    check_config(cfg)

    check_version()

    main_arch = cfg.architecture

    # Use CPU for exporting inference model instead of GPU
    place = fluid.CPUPlace()
    exe = fluid.Executor(place)

    model = create(main_arch)

    startup_prog = fluid.Program()
    infer_prog = fluid.Program()
    with fluid.program_guard(infer_prog, startup_prog):
        with fluid.unique_name.guard():
            inputs_def = cfg['TestReader']['inputs_def']
            inputs_def['use_dataloader'] = False
            feed_vars, _ = model.build_inputs(**inputs_def)
            # postprocess not need in exclude_nms, exclude NMS in exclude_nms mode
            test_fetches = model.test(feed_vars, exclude_nms=FLAGS.exclude_nms)
    infer_prog = infer_prog.clone(True)
    check_py_func(infer_prog)

    exe.run(startup_prog)
    checkpoint.load_params(exe, infer_prog, cfg.weights)

    save_infer_model(FLAGS, exe, feed_vars, test_fetches, infer_prog)
    dump_infer_config(FLAGS, cfg)
Ejemplo n.º 7
0
 def init_model(self):
     parser = ArgsParser()
     parser.add_argument("-c",
                         "--config",
                         type=str,
                         default=self.cfg_path,
                         help="configuration file to use")
     parser.add_argument(
         "--draw_threshold",
         type=float,
         default=self.conf_thres,
         help="Threshold to reserve the result for visualization.")
     args = parser.parse_args()
     cfg = load_config(args.config)
     cfg.weights = self.weights
     cfg.use_gpu = self.use_gpu
     paddle.set_device('gpu' if cfg.use_gpu else 'cpu')
     if 'norm_type' in cfg and cfg[
             'norm_type'] == 'sync_bn' and not cfg.use_gpu:
         cfg['norm_type'] = 'bn'
     check_config(cfg)
     check_gpu(cfg.use_gpu)
     check_version()
     """ build trainer """
     trainer = Trainer(cfg, mode='test')
     """ load weights """
     trainer.load_weights(cfg.weights)
     return trainer
Ejemplo n.º 8
0
def main():
    FLAGS = parse_args()
    cfg = load_config(FLAGS.config)
    cfg['use_vdl'] = FLAGS.use_vdl
    cfg['vdl_log_dir'] = FLAGS.vdl_log_dir
    merge_config(FLAGS.opt)

    # disable npu in config by default
    if 'use_npu' not in cfg:
        cfg.use_npu = False

    if cfg.use_gpu:
        place = paddle.set_device('gpu')
    elif cfg.use_npu:
        place = paddle.set_device('npu')
    else:
        place = paddle.set_device('cpu')

    if 'norm_type' in cfg and cfg['norm_type'] == 'sync_bn' and not cfg.use_gpu:
        cfg['norm_type'] = 'bn'

    if FLAGS.slim_config:
        cfg = build_slim_model(cfg, FLAGS.slim_config, mode='test')

    check_config(cfg)
    check_gpu(cfg.use_gpu)
    check_npu(cfg.use_npu)
    check_version()

    run(FLAGS, cfg)
Ejemplo n.º 9
0
def main():
    cfg = load_config(FLAGS.config)

    if 'architecture' in cfg:
        main_arch = cfg.architecture
    else:
        raise ValueError("'architecture' not specified in config file.")

    merge_config(FLAGS.opt)

    # Use CPU for exporting inference model instead of GPU
    place = fluid.CPUPlace()
    exe = fluid.Executor(place)

    model = create(main_arch)

    startup_prog = fluid.Program()
    infer_prog = fluid.Program()
    with fluid.program_guard(infer_prog, startup_prog):
        with fluid.unique_name.guard():
            inputs_def = cfg['TestReader']['inputs_def']
            inputs_def['use_dataloader'] = False
            feed_vars, _ = model.build_inputs(**inputs_def)
            test_fetches = model.test(feed_vars)
    infer_prog = infer_prog.clone(True)

    exe.run(startup_prog)
    checkpoint.load_params(exe, infer_prog, cfg.weights)

    save_infer_model(FLAGS, exe, feed_vars, test_fetches, infer_prog)
Ejemplo n.º 10
0
def main():
    FLAGS = parse_args()
    cfg = load_config(FLAGS.config)
    merge_config(FLAGS.opt)

    # disable npu in config by default
    if 'use_npu' not in cfg:
        cfg.use_npu = False

    # disable xpu in config by default
    if 'use_xpu' not in cfg:
        cfg.use_xpu = False

    if cfg.use_gpu:
        place = paddle.set_device('gpu')
    elif cfg.use_npu:
        place = paddle.set_device('npu')
    elif cfg.use_xpu:
        place = paddle.set_device('xpu')
    else:
        place = paddle.set_device('cpu')

    if 'norm_type' in cfg and cfg['norm_type'] == 'sync_bn' and not cfg.use_gpu:
        cfg['norm_type'] = 'bn'

    check_config(cfg)
    check_gpu(cfg.use_gpu)
    check_npu(cfg.use_npu)
    check_xpu(cfg.use_xpu)
    check_version()

    run(FLAGS, cfg)
Ejemplo n.º 11
0
    def tracking(self, video_stream, output_dir='mot_result', visualization=True, draw_threshold=0.5, use_gpu=False):
        '''
        Track a video, and save the prediction results into output_dir, if visualization is set as True.

        video_stream: the video path
        output_dir: specify the dir to save the results
        visualization: if True, save the results as a video, otherwise not.
        draw_threshold: the threshold for the prediction results
        use_gpu: if True, use gpu to perform the computation, otherwise cpu.
        '''
        self.video_stream = video_stream
        self.output_dir = output_dir
        self.visualization = visualization
        self.draw_threshold = draw_threshold
        self.use_gpu = use_gpu

        cfg = load_config(os.path.join(self.directory, 'config', 'fairmot_dla34_30e_1088x608.yml'))
        check_config(cfg)

        place = 'gpu:0' if use_gpu else 'cpu'
        place = paddle.set_device(place)

        paddle.disable_static()
        tracker = StreamTracker(cfg, mode='test')

        # load weights
        tracker.load_weights_jde(self.pretrained_model)
        signal.signal(signal.SIGINT, self.signalhandler)
        # inference
        tracker.videostream_predict(video_stream=video_stream,
                                    output_dir=output_dir,
                                    data_type='mot',
                                    model_type='FairMOT',
                                    visualization=visualization,
                                    draw_threshold=draw_threshold)
Ejemplo n.º 12
0
    def stream_mode(self, output_dir='mot_result', visualization=True, draw_threshold=0.5, use_gpu=False):
        '''
        Entering the stream mode enables image stream prediction. Users can predict the images like a stream and save the results to a video.

        output_dir: specify the dir to save the results
        visualization: if True, save the results as a video, otherwise not.
        draw_threshold: the threshold for the prediction results
        use_gpu: if True, use gpu to perform the computation, otherwise cpu.
        '''
        self.output_dir = output_dir
        self.visualization = visualization
        self.draw_threshold = draw_threshold
        self.use_gpu = use_gpu

        cfg = load_config(os.path.join(self.directory, 'config', 'fairmot_dla34_30e_1088x608.yml'))
        check_config(cfg)

        place = 'gpu:0' if use_gpu else 'cpu'
        place = paddle.set_device(place)

        paddle.disable_static()
        self.tracker = StreamTracker(cfg, mode='test')

        # load weights
        self.tracker.load_weights_jde(self.pretrained_model)
        signal.signal(signal.SIGINT, self.signalhandler)
        return self
Ejemplo n.º 13
0
def main():
    """
    Main evaluate function
    """
    cfg = load_config(FLAGS.config)
    if 'architecture' in cfg:
        main_arch = cfg.architecture
    else:
        raise ValueError("'architecture' not specified in config file.")

    merge_config(FLAGS.opt)

    # check if set use_gpu=True in paddlepaddle cpu version
    check_gpu(cfg.use_gpu)

    if 'eval_feed' not in cfg:
        eval_feed = create(main_arch + 'EvalFeed')
    else:
        eval_feed = create(cfg.eval_feed)

    # define executor
    place = fluid.CUDAPlace(0) if cfg.use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)

    # build program
    model = create(main_arch)
    startup_prog = fluid.Program()
    eval_prog = fluid.Program()
    with fluid.program_guard(eval_prog, startup_prog):
        with fluid.unique_name.guard():
            _, feed_vars = create_feed(eval_feed, iterable=True)
            fetches = model.eval(feed_vars)

    eval_prog = eval_prog.clone(True)

    # load model
    exe.run(startup_prog)
    if 'weights' in cfg:
        checkpoint.load_params(exe, eval_prog, cfg.weights)

    assert cfg.metric in ['WIDERFACE'], \
            "unknown metric type {}".format(cfg.metric)

    annotation_file = getattr(eval_feed.dataset, 'annotation', None)
    dataset_dir = FLAGS.dataset_dir if FLAGS.dataset_dir else \
        getattr(eval_feed.dataset, 'dataset_dir', None)
    img_root_dir = dataset_dir
    if FLAGS.eval_mode == "widerface":
        image_dir = getattr(eval_feed.dataset, 'image_dir', None)
        img_root_dir = os.path.join(dataset_dir, image_dir)
    gt_file = os.path.join(dataset_dir, annotation_file)
    pred_dir = FLAGS.output_eval if FLAGS.output_eval else 'output/pred'
    face_eval_run(exe,
                  eval_prog,
                  fetches,
                  img_root_dir,
                  gt_file,
                  pred_dir=pred_dir,
                  eval_mode=FLAGS.eval_mode,
                  multi_scale=FLAGS.multi_scale)
Ejemplo n.º 14
0
def main():
    cfg = load_config(FLAGS.config)

    if 'architecture' in cfg:
        main_arch = cfg.architecture
    else:
        raise ValueError("'architecture' not specified in config file.")

    merge_config(FLAGS.opt)

    if 'test_feed' not in cfg:
        test_feed = create(main_arch + 'TestFeed')
    else:
        test_feed = create(cfg.test_feed)

    # Use CPU for exporting inference model instead of GPU
    place = fluid.CPUPlace()
    exe = fluid.Executor(place)

    model = create(main_arch)

    startup_prog = fluid.Program()
    infer_prog = fluid.Program()
    with fluid.program_guard(infer_prog, startup_prog):
        with fluid.unique_name.guard():
            _, feed_vars = create_feed(test_feed, iterable=True)
            test_fetches = model.test(feed_vars)
    infer_prog = infer_prog.clone(True)

    exe.run(startup_prog)
    checkpoint.load_params(exe, infer_prog, cfg.weights)

    save_infer_model(FLAGS, exe, feed_vars, test_fetches, infer_prog)
def main():
    paddle.set_device("cpu")
    FLAGS = parse_args()

    cfg = load_config(FLAGS.config)
    # TODO: to be refined in the future
    if 'norm_type' in cfg and cfg['norm_type'] == 'sync_bn':
        FLAGS.opt['norm_type'] = 'bn'
    merge_config(FLAGS.opt)
    if FLAGS.slim_config:
        slim_cfg = load_config(FLAGS.slim_config)
        merge_config(slim_cfg)
    check_config(cfg)
    check_gpu(cfg.use_gpu)
    check_version()

    run(FLAGS, cfg)
Ejemplo n.º 16
0
    def test_eval_mstest(self):
        cfg = load_config(self.mstest_cfg_file)
        trainer = Trainer(cfg, mode='eval')

        cfg.weights = 'https://paddledet.bj.bcebos.com/models/faster_rcnn_r34_fpn_1x_coco.pdparams'
        trainer.load_weights(cfg.weights)

        trainer.evaluate()
Ejemplo n.º 17
0
def main():
    cfg = load_config(FLAGS.config)

    if 'architecture' in cfg:
        main_arch = cfg.architecture
    else:
        raise ValueError("'architecture' not specified in config file.")

    merge_config(FLAGS.opt)

    # Use CPU for exporting inference model instead of GPU
    place = fluid.CPUPlace()
    exe = fluid.Executor(place)

    model = create(main_arch)

    startup_prog = fluid.Program()
    infer_prog = fluid.Program()
    with fluid.program_guard(infer_prog, startup_prog):
        with fluid.unique_name.guard():
            inputs_def = cfg['TestReader']['inputs_def']
            inputs_def['use_dataloader'] = False
            feed_vars, _ = model.build_inputs(**inputs_def)
            test_fetches = model.test(feed_vars)
    infer_prog = infer_prog.clone(True)

    pruned_params = FLAGS.pruned_params
    assert (
        FLAGS.pruned_params is not None
    ), "FLAGS.pruned_params is empty!!! Please set it by '--pruned_params' option."
    pruned_params = FLAGS.pruned_params.strip().split(",")
    logger.info("pruned params: {}".format(pruned_params))
    pruned_ratios = [float(n) for n in FLAGS.pruned_ratios.strip().split(",")]
    logger.info("pruned ratios: {}".format(pruned_ratios))
    assert (len(pruned_params) == len(pruned_ratios)
            ), "The length of pruned params and pruned ratios should be equal."
    assert (pruned_ratios > [0] * len(pruned_ratios) and
            pruned_ratios < [1] * len(pruned_ratios)
            ), "The elements of pruned ratios should be in range (0, 1)."

    base_flops = flops(infer_prog)
    pruner = Pruner()
    infer_prog, _, _ = pruner.prune(
        infer_prog,
        fluid.global_scope(),
        params=pruned_params,
        ratios=pruned_ratios,
        place=place,
        only_graph=True)
    pruned_flops = flops(infer_prog)
    logger.info("pruned FLOPS: {}".format(
        float(base_flops - pruned_flops) / base_flops))

    exe.run(startup_prog)
    checkpoint.load_checkpoint(exe, infer_prog, cfg.weights)

    save_infer_model(FLAGS, exe, feed_vars, test_fetches, infer_prog)
Ejemplo n.º 18
0
def main():
    """
    Main evaluate function
    """
    cfg = load_config(FLAGS.config)
    if 'architecture' in cfg:
        main_arch = cfg.architecture
    else:
        raise ValueError("'architecture' not specified in config file.")

    merge_config(FLAGS.opt)

    # check if set use_gpu=True in paddlepaddle cpu version
    check_gpu(cfg.use_gpu)

    # define executor
    place = fluid.CUDAPlace(0) if cfg.use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)

    # build program
    model = create(main_arch)
    startup_prog = fluid.Program()
    eval_prog = fluid.Program()
    with fluid.program_guard(eval_prog, startup_prog):
        with fluid.unique_name.guard():
            inputs_def = cfg['EvalReader']['inputs_def']
            inputs_def['use_dataloader'] = False
            feed_vars, _ = model.build_inputs(**inputs_def)
            fetches = model.eval(feed_vars)

    eval_prog = eval_prog.clone(True)

    # load model
    exe.run(startup_prog)
    if 'weights' in cfg:
        checkpoint.load_params(exe, eval_prog, cfg.weights)

    assert cfg.metric in ['WIDERFACE'], \
            "unknown metric type {}".format(cfg.metric)

    dataset = cfg['EvalReader']['dataset']

    annotation_file = dataset.get_anno()
    dataset_dir = dataset.dataset_dir
    image_dir = os.path.join(
        dataset_dir,
        dataset.image_dir) if FLAGS.eval_mode == 'widerface' else dataset_dir

    pred_dir = FLAGS.output_eval if FLAGS.output_eval else 'output/pred'
    face_eval_run(exe,
                  eval_prog,
                  fetches,
                  image_dir,
                  annotation_file,
                  pred_dir=pred_dir,
                  eval_mode=FLAGS.eval_mode,
                  multi_scale=FLAGS.multi_scale)
Ejemplo n.º 19
0
def get_model(model_name, pretrained=True):
    cfg_file = get_config_file(model_name)
    cfg = load_config(cfg_file)
    model = create(cfg.architecture)

    if pretrained:
        load_weight(model, get_weights_url(model_name))

    return model
Ejemplo n.º 20
0
def main():
    cfg = load_config(FLAGS.config)
    if 'architecture' in cfg:
        main_arch = cfg.architecture
    else:
        raise ValueError("'architecture' not specified in config file.")

    merge_config(FLAGS.opt)
    if 'log_iter' not in cfg:
        cfg.log_iter = 20

    # check if set use_gpu=True in paddlepaddle cpu version
    check_gpu(cfg.use_gpu)

    if cfg.use_gpu:
        devices_num = fluid.core.get_cuda_device_count()
    else:
        devices_num = int(
            os.environ.get('CPU_NUM', multiprocessing.cpu_count()))

    if 'eval_feed' not in cfg:
        eval_feed = create(main_arch + 'EvalFeed')
    else:
        eval_feed = create(cfg.eval_feed)

    place = fluid.CUDAPlace(0) if cfg.use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)

    _, test_feed_vars = create_feed(eval_feed, False)

    eval_reader = create_reader(eval_feed, args_path=FLAGS.dataset_dir)
    #eval_pyreader.decorate_sample_list_generator(eval_reader, place)
    test_data_feed = fluid.DataFeeder(test_feed_vars.values(), place)

    assert os.path.exists(FLAGS.model_path)
    infer_prog, feed_names, fetch_targets = fluid.io.load_inference_model(
        dirname=FLAGS.model_path,
        executor=exe,
        model_filename=FLAGS.model_name,
        params_filename=FLAGS.params_name)

    eval_keys = ['bbox', 'gt_box', 'gt_label', 'is_difficult']
    eval_values = [
        'multiclass_nms_0.tmp_0', 'gt_box', 'gt_label', 'is_difficult'
    ]
    eval_cls = []
    eval_values[0] = fetch_targets[0]

    results = eval_run(exe, infer_prog, eval_reader, eval_keys, eval_values,
                       eval_cls, test_data_feed)

    resolution = None
    if 'mask' in results[0]:
        resolution = model.mask_head.resolution
    eval_results(results, eval_feed, cfg.metric, cfg.num_classes, resolution,
                 False, FLAGS.output_eval)
Ejemplo n.º 21
0
def build_slim_model(cfg, slim_cfg, mode='train'):
    with open(slim_cfg) as f:
        slim_load_cfg = yaml.load(f, Loader=yaml.Loader)
    if mode != 'train' and slim_load_cfg['slim'] == 'Distill':
        return cfg

    if slim_load_cfg['slim'] == 'Distill':
        model = DistillModel(cfg, slim_cfg)
        cfg['model'] = model
    elif slim_load_cfg['slim'] == 'DistillPrune':
        if mode == 'train':
            model = DistillModel(cfg, slim_cfg)
            pruner = create(cfg.pruner)
            pruner(model.student_model)
        else:
            model = create(cfg.architecture)
            weights = cfg.weights
            load_config(slim_cfg)
            pruner = create(cfg.pruner)
            model = pruner(model)
            load_pretrain_weight(model, weights)
        cfg['model'] = model
        cfg['slim_type'] = cfg.slim
    elif slim_load_cfg['slim'] == 'PTQ':
        model = create(cfg.architecture)
        load_config(slim_cfg)
        load_pretrain_weight(model, cfg.weights)
        slim = create(cfg.slim)
        cfg['slim_type'] = cfg.slim
        cfg['model'] = slim(model)
        cfg['slim'] = slim
    elif slim_load_cfg['slim'] == 'UnstructuredPruner':
        load_config(slim_cfg)
        slim = create(cfg.slim)
        cfg['slim_type'] = cfg.slim
        cfg['slim'] = slim
        cfg['unstructured_prune'] = True
    else:
        load_config(slim_cfg)
        model = create(cfg.architecture)
        if mode == 'train':
            load_pretrain_weight(model, cfg.pretrain_weights)
        slim = create(cfg.slim)
        cfg['slim_type'] = cfg.slim
        # TODO: fix quant export model in framework.
        if mode == 'test' and slim_load_cfg['slim'] == 'QAT':
            slim.quant_config['activation_preprocess_type'] = None
        cfg['model'] = slim(model)
        cfg['slim'] = slim
        if mode != 'train':
            load_pretrain_weight(cfg['model'], cfg.weights)

    return cfg
Ejemplo n.º 22
0
def main():
    FLAGS = parse_args()

    cfg = load_config(FLAGS.config)
    cfg['fp16'] = FLAGS.fp16
    cfg['fleet'] = FLAGS.fleet
    merge_config(FLAGS.opt)
    if FLAGS.slim_config:
        slim_cfg = load_config(FLAGS.slim_config)
        merge_config(slim_cfg)
        if 'weight_type' not in cfg:
            cfg.weight_type = FLAGS.weight_type
    check.check_config(cfg)
    check.check_gpu(cfg.use_gpu)
    check.check_version()

    place = 'gpu:{}'.format(ParallelEnv().dev_id) if cfg.use_gpu else 'cpu'
    place = paddle.set_device(place)

    run(FLAGS, cfg)
Ejemplo n.º 23
0
def main():
    paddle.set_device("cpu")
    FLAGS = parse_args()

    cfg = load_config(FLAGS.config)
    merge_config(FLAGS.opt)
    check_config(cfg)
    check_gpu(cfg.use_gpu)
    check_version()

    run(FLAGS, cfg)
Ejemplo n.º 24
0
    def test_loader_yaml(self):
        cfg_file = 'ppdet/data/tests/test.yml'
        cfg = load_config(cfg_file)
        data_cfg = '[!COCODataSet {{image_dir: {0}, dataset_dir: {1}, ' \
            'anno_path: {2}, sample_num: 10}}]'.format(
                self.image_dir, self.root_path, self.anno_path)
        dataset_ins = yaml.load(data_cfg, Loader=yaml.Loader)
        update_train_cfg = {'TrainReader': {'dataset': dataset_ins[0]}}
        update_test_cfg = {'EvalReader': {'dataset': dataset_ins[0]}}
        merge_config(update_train_cfg)
        merge_config(update_test_cfg)

        reader = create_reader(cfg['TrainReader'], 10)()
        for samples in reader:
            for sample in samples:
                im_shape = sample[0].shape
                self.assertEqual(im_shape[0], 3)
                self.assertEqual(im_shape[1] % 32, 0)
                self.assertEqual(im_shape[2] % 32, 0)

                im_info_shape = sample[1].shape
                self.assertEqual(im_info_shape[-1], 3)

                im_id_shape = sample[2].shape
                self.assertEqual(im_id_shape[-1], 1)

                gt_bbox_shape = sample[3].shape
                self.assertEqual(gt_bbox_shape[-1], 4)

                gt_class_shape = sample[4].shape
                self.assertEqual(gt_class_shape[-1], 1)
                self.assertEqual(gt_class_shape[0], gt_bbox_shape[0])

                is_crowd_shape = sample[5].shape
                self.assertEqual(is_crowd_shape[-1], 1)
                self.assertEqual(is_crowd_shape[0], gt_bbox_shape[0])

                mask = sample[6]
                self.assertEqual(len(mask), gt_bbox_shape[0])
                self.assertEqual(mask[0][0].shape[-1], 2)

        reader = create_reader(cfg['EvalReader'], 10)()
        for samples in reader:
            for sample in samples:
                im_shape = sample[0].shape
                self.assertEqual(im_shape[0], 3)
                self.assertEqual(im_shape[1] % 32, 0)
                self.assertEqual(im_shape[2] % 32, 0)

                im_info_shape = sample[1].shape
                self.assertEqual(im_info_shape[-1], 3)

                im_id_shape = sample[2].shape
                self.assertEqual(im_id_shape[-1], 1)
Ejemplo n.º 25
0
def main():
    ## 配置
    cfg = load_config(FLAGS.config)
    if 'architecture' in cfg:
        main_arch = cfg.architecture
    else:
        raise ValueError("'architecture' not specified in config file.")
    merge_config(FLAGS.opt)
    check_gpu(cfg.use_gpu)
    check_version()

    # json评价模式
    if FLAGS.json_file:
        logger.info("start evalute in json mode")
        dataset = cfg.EvalReader['dataset']
        if FLAGS.dataset == 'train':
            dataset = cfg.TrainReader['dataset']
        eval_json_results(FLAGS.json_file, 
            dataset=dataset, num_classes=cfg.num_classes)
        return

    ## 模型
    model = create(main_arch)   ####
    startup_prog = fluid.Program()
    eval_prog = fluid.Program()
    with fluid.program_guard(eval_prog, startup_prog):
        with fluid.unique_name.guard():
            inputs_def = cfg.EvalReader['inputs_def']
            feed_vars, loader = model.build_inputs(**inputs_def)
            fetches = model.eval(feed_vars)
    eval_prog = eval_prog.clone(True)
    extra_keys = ['gt_bbox', 'gt_class', 'is_difficult']
    keys, values, _ = parse_fetches(fetches, eval_prog, extra_keys)

    ## 执行器
    place = fluid.CUDAPlace(0) if cfg.use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)

    ## 数据
    reader = create_reader(cfg.EvalReader)  ####
    loader.set_sample_list_generator(reader, place)

    #### 运行 ####
    exe.run(startup_prog)
    ## 加载参数
    assert 'weights' in cfg, \
           'model can not load weights'
    checkpoint.load_params(exe, eval_prog, cfg.weights)

    ## 评价
    results = eval_run(exe, eval_prog, loader, keys, values)
    eval_results(results, cfg.num_classes)
Ejemplo n.º 26
0
def main():
    cfg = load_config(FLAGS.config)

    if 'architecture' in cfg:
        main_arch = cfg.architecture
    else:
        raise ValueError("'architecture' not specified in config file.")

    merge_config(FLAGS.opt)

    # Use CPU for exporting inference model instead of GPU
    place = fluid.CPUPlace()
    exe = fluid.Executor(place)

    model = create(main_arch)

    startup_prog = fluid.Program()
    infer_prog = fluid.Program()
    with fluid.program_guard(infer_prog, startup_prog):
        with fluid.unique_name.guard():
            inputs_def = cfg['TestReader']['inputs_def']
            inputs_def['use_dataloader'] = False
            feed_vars, _ = model.build_inputs(**inputs_def)
            test_fetches = model.test(feed_vars)
    infer_prog = infer_prog.clone(True)

    not_quant_pattern = []
    if FLAGS.not_quant_pattern:
        not_quant_pattern = FLAGS.not_quant_pattern
    config = {
        'weight_quantize_type': 'channel_wise_abs_max',
        'activation_quantize_type': 'moving_average_abs_max',
        'quantize_op_types': ['depthwise_conv2d', 'mul', 'conv2d'],
        'not_quant_pattern': not_quant_pattern
    }

    infer_prog = quant_aware(infer_prog, place, config, for_test=True)

    exe.run(startup_prog)
    checkpoint.load_params(exe, infer_prog, cfg.weights)

    infer_prog, int8_program = convert(infer_prog,
                                       place,
                                       config,
                                       save_int8=True)

    save_infer_model(os.path.join(FLAGS.output_dir, 'float'), exe, feed_vars,
                     test_fetches, infer_prog)

    save_infer_model(os.path.join(FLAGS.output_dir, 'int'), exe, feed_vars,
                     test_fetches, int8_program)
Ejemplo n.º 27
0
Archivo: pp_det.py Proyecto: dmxj/icv
    def init_cfg(self):
        self.cfg = load_config(self.config_file)
        if 'architecture' in self.cfg:
            self.main_arch = self.cfg.architecture
        else:
            raise ValueError("'architecture' not specified in config file.")

        self.cfg.use_gpu = self.use_gpu
        check_gpu(self.use_gpu)

        self.extra_keys = []
        if self.cfg['metric'] == 'COCO':
            self.extra_keys = ['im_info', 'im_id', 'im_shape']
        if self.cfg['metric'] == 'VOC':
            self.extra_keys = ['im_id', 'im_shape']
Ejemplo n.º 28
0
    def test_infer_mstest(self):
        cfg = load_config(self.mstest_cfg_file)
        trainer = Trainer(cfg, mode='test')

        cfg.weights = 'https://paddledet.bj.bcebos.com/models/faster_rcnn_r34_fpn_1x_coco.pdparams'
        trainer.load_weights(cfg.weights)
        tests_img_root = os.path.join(os.path.dirname(__file__), 'imgs')

        # input images to predict
        imgs = ['coco2017_val2017_000000000139.jpg', 'coco2017_val2017_000000000724.jpg']
        imgs = [os.path.join(tests_img_root, img) for img in imgs]
        trainer.predict(imgs,
                        draw_threshold=0.5,
                        output_dir='output',
                        save_txt=True)
Ejemplo n.º 29
0
    def __init__(self, cfg, slim_cfg):
        super(DistillModel, self).__init__()

        self.student_model = create(cfg.architecture)
        logger.debug('Load student model pretrain_weights:{}'.format(
            cfg.pretrain_weights))
        load_pretrain_weight(self.student_model, cfg.pretrain_weights)

        slim_cfg = load_config(slim_cfg)
        self.teacher_model = create(slim_cfg.architecture)
        self.distill_loss = create(slim_cfg.distill_loss)
        logger.debug('Load teacher model pretrain_weights:{}'.format(
            slim_cfg.pretrain_weights))
        load_pretrain_weight(self.teacher_model, slim_cfg.pretrain_weights)

        for param in self.teacher_model.parameters():
            param.trainable = False
Ejemplo n.º 30
0
    def __init__(self):

        self.size = 608

        self.draw_threshold = 0.1

        self.cfg = load_config('./configs/vehicle_yolov3_darknet.yml')

        self.place = fluid.CUDAPlace(
            0) if self.cfg.use_gpu else fluid.CPUPlace()
        self.exe = fluid.Executor(self.place)

        self.model = create(self.cfg.architecture)

        self.classifier = CarClassifier()

        self.init_params()