コード例 #1
0
    def _export_to_openvino(self, args, tools_dir):
        run_through_shell(f'python {os.path.join(tools_dir, "export.py")} '
                          f'{args["config"]} '
                          f'{args["load_weights"]} '
                          f'{args["save_model_to"]} '
                          f'openvino '
                          f'--input_format {args["openvino_input_format"]}')

        # FIXME(ikrylov): remove alt_ssd_export block as soon as it becomes useless.
        # (LeonidBeynenson): Please, note that alt_ssd_export appoach may be applied only
        #                    to SSD models only that were not compressed by NNCF.
        config = Config.fromfile(args["config"])
        should_run_alt_ssd_export = (hasattr(config.model, 'bbox_head') and
                                     config.model.bbox_head.type == 'SSDHead'
                                     and not config.get('nncf_config'))

        if is_checkpoint_nncf and is_checkpoint_nncf(args['load_weights']):
            # If the config does not contain NNCF part,
            # but the checkpoint was trained with NNCF compression,
            # the NNCF config will be read from checkpoint.
            # Since alt_ssd_export is incompatible with NNCF compression,
            # alt_ssd_export should not be run in this case.
            should_run_alt_ssd_export = False

        if should_run_alt_ssd_export:
            run_through_shell(
                f'python {os.path.join(tools_dir, "export.py")} '
                f'{args["config"]} '
                f'{args["load_weights"]} '
                f'{os.path.join(args["save_model_to"], "alt_ssd_export")} '
                f'openvino '
                f'--input_format {args["openvino_input_format"]} '
                f'--alt_ssd_export ')
コード例 #2
0
ファイル: export.py プロジェクト: dqawami/mmdetection
def main(args):
    assert args.opset in available_opsets
    assert args.opset > 9

    torch.set_default_tensor_type(torch.FloatTensor)
    model = init_detector(args.config, args.checkpoint, device='cpu')
    model.eval()
    if torch.cuda.is_available():
        model.cuda()
    device = next(model.parameters()).device
    cfg = model.cfg
    fake_data = get_fake_input(cfg, device=device)

    # BEGIN nncf part
    if cfg.get('nncf_config'):
        check_nncf_is_enabled()
        if not is_checkpoint_nncf(args.checkpoint):
            raise RuntimeError('Trying to make export with NNCF compression '
                               'a model snapshot that was NOT trained with NNCF')
        cfg.load_from = args.checkpoint
        cfg.resume_from = None
        compression_ctrl, model = wrap_nncf_model(model, cfg, None, get_fake_input)
        # TODO: apply the following string for NNCF 1.5.*
        #compression_ctrl.prepare_for_export()
    # END nncf part

    if args.target == 'openvino' and not args.alt_ssd_export:
        if hasattr(model, 'roi_head'):
            stub_roi_feature_extractor(model.roi_head, 'bbox_roi_extractor')
            stub_roi_feature_extractor(model.roi_head, 'mask_roi_extractor')

    mmcv.mkdir_or_exist(osp.abspath(args.output_dir))
    onnx_model_path = osp.join(args.output_dir,
                               osp.splitext(osp.basename(args.config))[0] + '.onnx')

    with torch.no_grad():
        export_to_onnx(model, fake_data, export_name=onnx_model_path, opset=args.opset,
                       alt_ssd_export=getattr(args, 'alt_ssd_export', False),
                       verbose=True)
        add_node_names(onnx_model_path)
        print(f'ONNX model has been saved to "{onnx_model_path}"')

    optimize_onnx_graph(onnx_model_path)

    if args.target == 'openvino':
        input_shape = list(fake_data['img'][0].shape)
        if args.input_shape:
            input_shape = [1, 3, *args.input_shape]
        export_to_openvino(cfg, onnx_model_path, args.output_dir, input_shape, args.input_format)
    else:
        pass
コード例 #3
0
ファイル: test.py プロジェクト: dqawami/mmdetection
def main():
    args = parse_args()

    assert args.out or args.eval or args.format_only or args.show \
           or args.show_dir, \
        ('Please specify at least one operation (save/eval/format/show the '
         'results / save the results) with the argument "--out", "--eval"'
         ', "--format-only", "--show" or "--show-dir"')

    if args.eval and args.format_only:
        raise ValueError('--eval and --format_only cannot be both specified')

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = Config.fromfile(args.config)
    if args.update_config:
        cfg.merge_from_dict(args.update_config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(cfg.data.test)
    data_loader = build_dataloader(dataset,
                                   samples_per_gpu=1,
                                   workers_per_gpu=cfg.data.workers_per_gpu,
                                   dist=distributed,
                                   shuffle=False)

    # build the model and load checkpoint
    model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)

    # nncf model wrapper
    if cfg.get('nncf_config'):
        check_nncf_is_enabled()
        if not is_checkpoint_nncf(args.checkpoint):
            raise RuntimeError(
                'Trying to make testing with NNCF compression a model snapshot that was NOT trained with NNCF'
            )
        cfg.load_from = args.checkpoint
        cfg.resume_from = None
        if torch.cuda.is_available():
            model = model.cuda()
        _, model = wrap_nncf_model(model, cfg, None, get_fake_input)
        checkpoint = torch.load(args.checkpoint, map_location=None)
    else:
        fp16_cfg = cfg.get('fp16', None)
        if fp16_cfg is not None:
            wrap_fp16_model(model)
        checkpoint = load_checkpoint(model,
                                     args.checkpoint,
                                     map_location='cpu')
        if args.fuse_conv_bn:  # TODO: FIXME: should it be inside this 'else' branch???
            from tools.fuse_conv_bn import fuse_module
            model = fuse_module(model)

    # old versions did not save class info in checkpoints, this walkaround is
    # for backward compatibility
    if 'CLASSES' in checkpoint['meta']:
        model.CLASSES = checkpoint['meta']['CLASSES']
    else:
        model.CLASSES = dataset.CLASSES

    if torch.cuda.is_available():
        if not distributed:
            model = MMDataParallel(model, device_ids=[0])
            outputs = single_gpu_test(model, data_loader, args.show,
                                      args.show_dir, args.show_score_thr)
        else:
            model = MMDistributedDataParallel(
                model.cuda(),
                device_ids=[torch.cuda.current_device()],
                broadcast_buffers=False)
            outputs = multi_gpu_test(model, data_loader, args.tmpdir,
                                     args.gpu_collect)
    else:
        model = MMDataCPU(model)
        outputs = single_gpu_test(model, data_loader, args.show, args.show_dir,
                                  args.show_score_thr)

    rank, _ = get_dist_info()
    if rank == 0:
        if args.out:
            print(f'\nwriting results to {args.out}')
            mmcv.dump(outputs, args.out)
        kwargs = cfg.get('evaluation', {})
        kwargs.pop('interval', None)
        kwargs.pop('gpu_collect', None)
        kwargs.update({} if args.options is None else args.options)
        kwargs['metric'] = args.eval
        if args.format_only:
            dataset.format_results(outputs, **kwargs)
        if args.eval:
            dataset.evaluate(outputs, **kwargs)
コード例 #4
0
ファイル: export.py プロジェクト: vanyalzr/mmdetection
def main(args):
    assert args.opset in available_opsets
    assert args.opset > 9

    torch.set_default_tensor_type(torch.FloatTensor)

    config = mmcv.Config.fromfile(args.config)
    if args.update_config:
        config.merge_from_dict(args.update_config)

    model = init_detector(config, args.checkpoint, device='cpu')
    model.eval()
    if torch.cuda.is_available():
        model.cuda()
    device = next(model.parameters()).device
    cfg = model.cfg
    fake_data = get_fake_input(cfg, device=device)

    # BEGIN nncf part
    was_model_compressed = is_checkpoint_nncf(args.checkpoint)
    cfg_contains_nncf = cfg.get('nncf_config')

    if cfg_contains_nncf and not was_model_compressed:
        raise RuntimeError('Trying to make export with NNCF compression '
                           'a model snapshot that was NOT trained with NNCF')

    if was_model_compressed and not cfg_contains_nncf:
        # reading NNCF config from checkpoint
        nncf_part = get_nncf_config_from_meta(args.checkpoint)
        for k, v in nncf_part.items():
            cfg[k] = v

    if cfg.get('nncf_config'):
        alt_ssd_export = getattr(args, 'alt_ssd_export', False)
        assert not alt_ssd_export, \
                'Export of NNCF-compressed model is incompatible with --alt_ssd_export'
        check_nncf_is_enabled()
        cfg.load_from = args.checkpoint
        cfg.resume_from = None
        compression_ctrl, model = wrap_nncf_model(model, cfg, None,
                                                  get_fake_input)
        compression_ctrl.prepare_for_export()
    # END nncf part

    mmcv.mkdir_or_exist(osp.abspath(args.output_dir))
    onnx_model_path = osp.join(
        args.output_dir,
        osp.splitext(osp.basename(args.config))[0] + '.onnx')

    with torch.no_grad():
        export_to_onnx(model,
                       fake_data,
                       export_name=onnx_model_path,
                       opset=args.opset,
                       alt_ssd_export=getattr(args, 'alt_ssd_export', False),
                       target=args.target,
                       verbose=False)
        add_node_names(onnx_model_path)
        print(f'ONNX model has been saved to "{onnx_model_path}"')

    optimize_onnx_graph(onnx_model_path)

    with_text = False
    if args.target == 'openvino' and not args.alt_ssd_export:
        if hasattr(model, 'roi_head'):
            if getattr(model.roi_head, 'with_text', False):
                with_text = True

    if args.target == 'openvino':
        input_shape = list(fake_data['img'][0].shape)
        if args.input_shape:
            input_shape = [1, 3, *args.input_shape]
        export_to_openvino(cfg,
                           onnx_model_path,
                           args.output_dir,
                           input_shape,
                           args.input_format,
                           with_text=with_text)
    else:
        pass