Exemple #1
0
def test_json_against_nncf_config_schema(config_test_struct):
    config_path, should_pass = config_test_struct
    if should_pass:
        _ = NNCFConfig.from_json(str(config_path))
    else:
        with pytest.raises(jsonschema.ValidationError):
            _ = NNCFConfig.from_json(str(config_path))
Exemple #2
0
def test_model_can_be_loaded_with_resume(_params):
    p = _params
    sample_config_path = p['sample_config_path']
    checkpoint_path = p['checkpoint_path']

    config = SampleConfig.from_json(str(sample_config_path))
    nncf_config = NNCFConfig.from_json(str(sample_config_path))

    config.execution_mode = p['execution_mode']

    config.current_gpu = 0
    config.device = get_device(config)
    config.distributed = config.execution_mode in (ExecutionMode.DISTRIBUTED, ExecutionMode.MULTIPROCESSING_DISTRIBUTED)
    if config.distributed:
        config.dist_url = "tcp://127.0.0.1:9898"
        config.dist_backend = "nccl"
        config.rank = 0
        config.world_size = 1
        configure_distributed(config)

    model_name = config['model']
    model = load_model(model_name,
                       pretrained=False,
                       num_classes=config.get('num_classes', 1000),
                       model_params=config.get('model_params'))

    model.to(config.device)
    model, compression_ctrl = create_compressed_model_and_algo_for_test(model, nncf_config)
    model, _ = prepare_model_for_execution(model, config)

    if config.distributed:
        compression_ctrl.distributed()

    checkpoint = torch.load(checkpoint_path, map_location='cpu')
    load_state(model, checkpoint['state_dict'], is_resume=True)
Exemple #3
0
def main():
    model_bin, model_xml = get_ir_paths(args.model, args.bin)

    config = NNCFConfig.from_json(args.config)

    input_infos_list = create_input_infos(config)
    image_size = input_infos_list[0].shape[-1]

    size = int(image_size / 0.875)

    print('IE version: {}'.format(get_version()))

    # NOTE: importing torch after loading IE to plugin to avoid issue with built-in MKLDNN of PyTorch
    plugin = IEPlugin(device='CPU', plugin_dirs=args.cpu_plugin_dir)
    plugin.add_cpu_extension(
        os.path.join(args.cpu_plugin_dir, "libcpu_extension.so"))
    net = IENetwork(model=model_xml, weights=model_bin)
    exec_net = getExecNet(plugin, net)
    from torch.utils.data import DataLoader
    import torchvision.datasets as datasets
    import torchvision.transforms as transforms

    val_loader = DataLoader(datasets.ImageFolder(
        args.data,
        transforms.Compose([
            transforms.Resize(size),
            transforms.CenterCrop(image_size),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
        ])),
                            batch_size=1,
                            shuffle=False,
                            num_workers=4,
                            pin_memory=True)
    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)
    config['log_dir'] = args.output_dir

    infer_fn = partial(infer_ie_model, net=net)
    validate_general(val_loader, exec_net, infer_fn)

    validate_torch_model(os.path.join(args.output_dir, "PTH"),
                         config=config,
                         num_layers=args.num_layers,
                         dump=args.dump,
                         val_loader=val_loader,
                         cuda=args.cuda)
Exemple #4
0
 def q_dq_config(config):
     nncf_config = NNCFConfig.from_json(config)
     if "compression" in nncf_config:
         compression_config = nncf_config["compression"]
         quantization_config = None
         if isinstance(compression_config, list):
             matches = []
             for subconfig in compression_config:
                 if subconfig["algorithm"] == "quantization":
                     matches.append(subconfig)
             if matches:
                 assert len(matches) == 1
                 quantization_config = matches[0]
         else:
             if compression_config["algorithm"] == "quantization":
                 quantization_config = compression_config
         if quantization_config is not None:
             quantization_config["export_to_onnx_standard_ops"] = True
     return nncf_config