def main():
    model_bin, model_xml = get_ir_paths(args.model, args.bin)

    with open(model_bin, "rb") as f:
        buffer = f.read()

    ignored = args.ignore.split(",") + get_ignored_layers(
        model_xml, args.sparsify_first_conv, args.sparsify_fc)

    all_parameters = find_all_parameters(buffer, model_xml)
    total_params = 0
    zero_params = 0
    for name, param in all_parameters.items():
        if name.split('.')[0] in ignored:
            continue
        total_params += param.data.size
        zero_params += (param.data == 0).sum()

    print("initial sparsity: {:.2f}%".format(
        (zero_params / total_params) * 100))

    if args.sparsity_level:
        sparsify(all_parameters, ignored, args.normed_threshold)

    if args.output:
        print("saving new ir to: {}".format(args.output))

        with open(args.output, "wb") as f:
            f.write(generate_new_bin(buffer, all_parameters))
예제 #2
0
def main():
    model_bin, model_xml = get_ir_paths(args.model, args.bin)

    config = NNCFConfig.from_json(args.config)

    input_infos_list = create_input_infos(config)
    image_size = input_infos_list[0].shape[-1]

    size = int(image_size / 0.875)

    print('IE version: {}'.format(get_version()))

    # NOTE: importing torch after loading IE to plugin to avoid issue with built-in MKLDNN of PyTorch
    plugin = IEPlugin(device='CPU', plugin_dirs=args.cpu_plugin_dir)
    plugin.add_cpu_extension(
        os.path.join(args.cpu_plugin_dir, "libcpu_extension.so"))
    net = IENetwork(model=model_xml, weights=model_bin)
    exec_net = getExecNet(plugin, net)
    from torch.utils.data import DataLoader
    import torchvision.datasets as datasets
    import torchvision.transforms as transforms

    val_loader = DataLoader(datasets.ImageFolder(
        args.data,
        transforms.Compose([
            transforms.Resize(size),
            transforms.CenterCrop(image_size),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
        ])),
                            batch_size=1,
                            shuffle=False,
                            num_workers=4,
                            pin_memory=True)
    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)
    config['log_dir'] = args.output_dir

    infer_fn = partial(infer_ie_model, net=net)
    validate_general(val_loader, exec_net, infer_fn)

    validate_torch_model(os.path.join(args.output_dir, "PTH"),
                         config=config,
                         num_layers=args.num_layers,
                         dump=args.dump,
                         val_loader=val_loader,
                         cuda=args.cuda)
def main():
    model_bin, model_xml = get_ir_paths(args.model, args.bin)

    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

    ir_weights = collect_IR_weights(os.path.join(args.output_dir, "IR"), model_xml, model_bin, args.num_layers)

    config = json.load(open(args.config))
    torch_weights = collect_torch_weights(os.path.join(args.output_dir, "PTH"), config, args.num_layers)

    assert len(ir_weights) == len(torch_weights), '{} vs {}'.format(len(ir_weights), len(torch_weights))
    print("Maximum of absolute difference - IR vs Torch")
    max_max = []
    for (k1, v1), (k2, v2) in zip(ir_weights.items(), torch_weights.items()):
        max_diff = abs(v1 - v2).max()
        max_max.append(max_diff)
        print("{0:.5} - max diff [{1:}]  vs  [{2:}]".format(max_diff, k1, k2))
    print("Global maximum:  {0:.5}".format(np.max(max_max)))