def test_can_restore_binary_mask_on_magnitude_algo_resume():
    config = get_empty_config()
    config['compression'] = {
        "algorithm": "magnitude_sparsity",
        "weight_importance": "abs",
        "params": {
            "schedule": "multistep",
            "sparsity_levels": [0.3, 0.5]
        }
    }
    magnitude_algo = create_compression_algorithm(MagnitudeTestModel(), config)
    sparse_model = magnitude_algo.model
    with torch.no_grad():
        sparse_model(torch.ones([1, 1, 10, 10]))

    config = get_empty_config()
    config["compression"] = {"algorithm": "const_sparsity"}
    const_algo = create_compression_algorithm(MagnitudeTestModel(), config)
    const_sparse_model = const_algo.model

    load_state(const_sparse_model, sparse_model.state_dict())

    op = const_sparse_model.conv1.pre_ops['0']
    check_equal(ref_mask_1, op.operand.binary_mask)

    op = const_sparse_model.conv2.pre_ops['0']
    check_equal(ref_mask_2, op.operand.binary_mask)
Ejemplo n.º 2
0
def test_can_restore_binary_mask_on_magnitude_quant_algo_resume():
    config = get_empty_config()
    config["compression"] = [
        {"algorithm": "magnitude_sparsity", "weight_importance": "abs",
         "params": {"schedule": "multistep", "sparsity_levels": [0.3, 0.5]}},
        {"algorithm": "quantization"}]
    reset_context('orig')
    reset_context('quantized_graphs')
    magnitude_quant_algo = create_compression_algorithm(MagnitudeTestModel(), config)
    # load_state doesn't support CPU + Quantization
    sparse_model = torch.nn.DataParallel(magnitude_quant_algo.model)
    sparse_model.cuda()
    with torch.no_grad():
        sparse_model(torch.ones([1, 1, 10, 10]))

    reset_context('orig')
    reset_context('quantized_graphs')
    config = get_empty_config()
    config["compression"] = [{"algorithm": "const_sparsity"}, {"algorithm": "quantization"}]
    const_algo = create_compression_algorithm(MagnitudeTestModel(), config)
    const_sparse_model = const_algo.model

    load_state(const_sparse_model, sparse_model.state_dict())

    op = const_sparse_model.module.conv1.pre_ops['0']
    check_equal(ref_mask_1, op.operand.binary_mask)

    op = const_sparse_model.module.conv2.pre_ops['0']
    check_equal(ref_mask_2, op.operand.binary_mask)
Ejemplo n.º 3
0
def test_can_not_create_magnitude_algo__without_steps():
    model = MagnitudeTestModel()
    config = get_basic_magnitude_sparsity_config()
    config['compression']['params'] = {
        'schedule': 'multistep',
        'sparsity_levels': [0.1]
    }
    with pytest.raises(AttributeError):
        create_compression_algorithm(model, config)
def test_can_create_rb_algo__with_adaptive_scheduler():
    config = get_empty_config()
    config['compression']['algorithm'] = 'rb_sparsity'
    config['compression']["params"]["schedule"] = 'adaptive'
    compression_algo = create_compression_algorithm(BasicConvTestModel(),
                                                    config)
    assert isinstance(compression_algo.scheduler, AdaptiveSparsityScheduler)
def test_can_choose_scheduler(algo, schedule_type, scheduler_class):
    config = get_empty_config()
    config['compression']['algorithm'] = algo
    config['compression']["params"]["schedule"] = schedule_type
    compression_algo = create_compression_algorithm(BasicConvTestModel(),
                                                    config)
    assert isinstance(compression_algo.scheduler, scheduler_class)
def test_sparse_algo_can_collect_sparse_layers():
    model = TwoConvTestModel()

    config = get_basic_sparsity_config()
    compression_algo = create_compression_algorithm(model, config)

    assert len(compression_algo.sparsified_module_info) == 2
def test_magnitude_scheduler_can_do_epoch_step__with_norm():
    model = MagnitudeTestModel()
    config = get_multistep_normed_abs_config()
    compression_algo = create_compression_algorithm(model, config)
    scheduler = compression_algo.scheduler
    assert isinstance(scheduler, MultiStepSparsityScheduler)

    assert pytest.approx(compression_algo.sparsity_level) == 0.1
    assert compression_algo.threshold == pytest.approx(0.219, 0.01)
    assert scheduler.prev_ind == 0

    scheduler.epoch_step()
    assert compression_algo.sparsity_level == 0.5
    assert compression_algo.threshold == pytest.approx(0.243, 0.01)
    assert scheduler.prev_ind == 1

    scheduler.epoch_step()
    assert compression_algo.sparsity_level == 0.5
    assert compression_algo.threshold == pytest.approx(0.243, 0.01)
    assert scheduler.prev_ind == 1

    scheduler.epoch_step()
    assert compression_algo.sparsity_level == 0.9
    assert compression_algo.threshold == pytest.approx(0.371, 0.01)
    assert scheduler.prev_ind == 2
Ejemplo n.º 8
0
def test_can_load_quant_algo__with_defaults():
    model = BasicConvTestModel()
    config = get_basic_quantization_config()
    reset_context('orig')
    reset_context('quantized_graphs')
    compression_algo = create_compression_algorithm(deepcopy(model), config)
    assert isinstance(compression_algo, Quantization)
    quant_model = compression_algo.model

    model_conv = get_all_modules_by_type(model, 'Conv2d')
    quant_model_conv = get_all_modules_by_type(
        quant_model.get_nncf_wrapped_module(), 'NNCFConv2d')
    assert len(model_conv) == len(quant_model_conv)

    for module_name in model_conv:
        scope = module_name.split('/')
        scope[-1] = scope[-1].replace('Conv2d', 'NNCFConv2d')
        quant_module_name = '/'.join(scope)
        assert quant_module_name in quant_model_conv

        store = []
        for op in quant_model_conv[quant_module_name].pre_ops.values():
            if isinstance(op, (UpdateInputs, UpdateWeight)) and isinstance(
                    op.operand, SymmetricQuantizer):
                assert op.__class__.__name__ not in store
                store.append(op.__class__.__name__)
        assert UpdateWeight.__name__ in store
Ejemplo n.º 9
0
def test_can_create_magnitude_sparse_algo__with_defaults():
    model = MagnitudeTestModel()
    config = get_basic_magnitude_sparsity_config()
    config['compression']['params'] = \
        {'schedule': 'multistep'}
    compression_algo = create_compression_algorithm(deepcopy(model), config)

    assert isinstance(compression_algo, MagnitudeSparsity)
    sparse_model = compression_algo.model
    assert compression_algo.sparsity_level == approx(0.1)
    assert len(list(sparse_model.modules())) == 11

    model_conv = get_all_modules_by_type(model, 'Conv2d')
    sparse_model_conv = get_all_modules_by_type(sparse_model, 'NNCFConv2d')
    assert len(model_conv) == len(sparse_model_conv)

    i = 0
    for module_name in model_conv:
        scope = module_name.split('/')
        scope[-1] = scope[-1].replace('Conv2d', 'NNCFConv2d')
        sparse_module_name = '/'.join(scope)
        assert sparse_module_name in sparse_model_conv

        store = []
        sparse_module = sparse_model_conv[sparse_module_name]
        ref_mask = torch.ones_like(sparse_module.weight) if i == 0 else ref_mask_2
        i += 1
        for op in sparse_module.pre_ops.values():
            if isinstance(op, UpdateWeight) and isinstance(op.operand, BinaryMask):
                assert compression_algo.threshold == approx(0.24, 0.1)
                assert torch.allclose(op.operand.binary_mask, ref_mask)
                assert isinstance(compression_algo.weight_importance, type(normed_magnitude))
                assert op.__class__.__name__ not in store
                store.append(op.__class__.__name__)
def test_can_create_const_sparse_algo__with_default():
    model = BasicConvTestModel()
    config = get_empty_config()
    config["compression"] = {"algorithm": "const_sparsity"}
    compression_algo = create_compression_algorithm(deepcopy(model), config)

    assert isinstance(compression_algo, ConstSparsity)
    sparse_model = compression_algo.model
    assert len(list(sparse_model.modules())) == 6

    model_conv = get_all_modules_by_type(model, 'Conv2d')
    sparse_model_conv = get_all_modules_by_type(sparse_model, 'NNCFConv2d')
    assert len(model_conv) == len(sparse_model_conv)

    for module_name in model_conv:
        scope = module_name.split('/')
        scope[-1] = scope[-1].replace('Conv2d', 'NNCFConv2d')
        sparse_module_name = '/'.join(scope)
        assert sparse_module_name in sparse_model_conv

        store = []
        sparse_module = sparse_model_conv[sparse_module_name]
        for op in sparse_module.pre_ops.values():
            if isinstance(op, UpdateWeight) and isinstance(
                    op.operand, BinaryMask):
                ref_mask = torch.ones_like(sparse_module.weight)
                assert torch.allclose(op.operand.binary_mask, ref_mask)
                assert op.__class__.__name__ not in store
                store.append(op.__class__.__name__)
Ejemplo n.º 11
0
def test_can_not_set_sparsity_more_than_one_for_magnitude_sparse_algo():
    model = MagnitudeTestModel()
    config = get_basic_magnitude_sparsity_config()
    compression_algo = create_compression_algorithm(model, config)
    with pytest.raises(AttributeError):
        compression_algo.set_sparsity_level(1)
        compression_algo.set_sparsity_level(1.2)
Ejemplo n.º 12
0
def create_compressed_model(model: Module,
                            config: Config,
                            dummy_forward_fn: Callable[[Module], Any] = None):
    """dummy_forward_fn will be used instead of a *forward* function call to build
    the graph - useful when the original training pipeline has special formats of
    data loader output or has additional *forward* arguments other than input tensors.
    Otherwise, the *forward* call of the model will be made with a single Tensor with
    a shape and type specified in config."""

    if dummy_forward_fn is None:
        input_info_list = create_input_infos(config)
        graph_builder = GraphBuilder(
            custom_forward_fn=create_dummy_forward_fn(input_info_list))
    else:
        graph_builder = GraphBuilder(custom_forward_fn=dummy_forward_fn)

    if is_main_process():
        print(*get_all_modules(model).keys(), sep="\n")
        reset_context('create_model')
        graph = graph_builder.build_graph(model, 'create_model')
        graph.dump_graph(osp.join(config.log_dir, "original_graph.dot"))

    compression_algo = create_compression_algorithm(model, config,
                                                    dummy_forward_fn)

    compressed_model = compression_algo.model
    if is_main_process() and not isinstance(compression_algo,
                                            NoCompressionAlgorithm):
        context_name = 'create_compressed_graph'
        if isinstance(compressed_model, QuantizedNetwork):
            context_name = compressed_model.get_context_name()
        graph = graph_builder.build_graph(compression_algo.model, context_name)
        graph.dump_graph(osp.join(config.log_dir, "compressed_graph.dot"))

    return compression_algo, compressed_model
Ejemplo n.º 13
0
def create_compressed_model(model, config):
    input_args = (next(model.parameters()).new_empty(
        config['input_sample_size']), )
    if is_main_process():
        print(*get_all_modules(model).keys(), sep="\n")
        ctx = build_graph(model,
                          input_args, {},
                          'create_model',
                          reset_context=True)
        dump_graph(ctx, osp.join(config.log_dir, "original_graph.dot"))

    compression_algo = create_compression_algorithm(model, config)

    if is_main_process():
        if hasattr(compression_algo.model, "build_graph"):
            ctx = compression_algo.model.build_graph()
        else:
            ctx = build_graph(compression_algo.model,
                              input_args, {},
                              "create_model_compressed",
                              reset_context=True)
        dump_graph(ctx, osp.join(config.log_dir, "compressed_graph.dot"))

    model = compression_algo.model
    return compression_algo, model
Ejemplo n.º 14
0
def test_quantization_configs__custom():
    model = BasicConvTestModel()

    config = get_basic_quantization_config()
    config['compression'].update({
        "weights": {
            "mode": "asymmetric",
            "per_channel": True,
            "signed": False,
            "bits": 4
        },
        "activations": {
            "mode": "asymmetric",
            "bits": 4,
            "signed": True,
        },
    })
    reset_context('orig')
    reset_context('quantized_graphs')
    compression_algo = create_compression_algorithm(model, config)

    weight_quantizers, activation_quantizers = split_quantizers(
        compression_algo.model)

    ref_weight_qconfig = QuantizerConfig(4, QuantizationMode.ASYMMETRIC, None,
                                         True, None, True)
    for wq in weight_quantizers:
        compare_qconfigs(ref_weight_qconfig, wq.config)

    ref_activation_qconfig = QuantizerConfig(4, QuantizationMode.ASYMMETRIC,
                                             True, False, None, False)
    for wq in activation_quantizers:
        compare_qconfigs(ref_activation_qconfig, wq.config)
Ejemplo n.º 15
0
def test_can_quantize_inputs_for_sparsity_plus_quantization():
    reset_context('orig')
    reset_context('quantized_graphs')
    reset_context('test')
    model = BasicConvTestModel()
    config = get_basic_sparsity_plus_quantization_config()
    compression_algo = create_compression_algorithm(model, config)
    assert isinstance(compression_algo, CompositeCompressionAlgorithm)
    sparse_quantized_model = compression_algo.model

    sparse_quantized_model_conv = get_all_modules_by_type(
        sparse_quantized_model, 'NNCFConv2d')

    nncf_module = next(iter(sparse_quantized_model_conv.values()))
    assert len(
        nncf_module.pre_ops
    ) == 3  # 1x weight sparsifier + 1x weight quantizer + 1x input quantizer
    assert isinstance(nncf_module.pre_ops['0'], UpdateWeight)
    assert isinstance(nncf_module.pre_ops['0'].op, RBSparsifyingWeight)

    assert isinstance(nncf_module.pre_ops['1'], UpdateWeight)
    assert isinstance(nncf_module.pre_ops['1'].op, SymmetricQuantizer)

    assert isinstance(nncf_module.pre_ops['2'], UpdateInputs)
    assert isinstance(nncf_module.pre_ops['2'].op, SymmetricQuantizer)
def test_can_load_sparse_algo__with_defaults():
    model = BasicConvTestModel()
    config = get_basic_sparsity_config()
    compression_algo = create_compression_algorithm(deepcopy(model), config)
    assert isinstance(compression_algo, RBSparsity)
    sparse_model = compression_algo.model

    model_conv = get_all_modules_by_type(model, 'Conv2d')
    sparse_model_conv = get_all_modules_by_type(sparse_model, 'NNCFConv2d')
    assert len(model_conv) == len(sparse_model_conv)

    for module_name in model_conv:
        scope = module_name.split('/')
        scope[-1] = scope[-1].replace('Conv2d', 'NNCFConv2d')
        sparse_module_name = '/'.join(scope)
        assert sparse_module_name in sparse_model_conv

        store = []
        sparse_module = sparse_model_conv[sparse_module_name]
        for op in sparse_module.pre_ops.values():
            if isinstance(op, UpdateWeight) and isinstance(
                    op.operand, RBSparsifyingWeight):
                assert torch.allclose(op.operand.binary_mask,
                                      torch.ones_like(sparse_module.weight))
                assert op.operand.sparsify
                assert op.__class__.__name__ not in store
                store.append(op.__class__.__name__)
Ejemplo n.º 17
0
def test_magnitude_algo_binary_masks_are_applied():
    model = BasicConvTestModel()
    config = get_empty_config()
    config['compression']['algorithm'] = "magnitude_sparsity"
    compression_algo = create_compression_algorithm(model, config)
    compressed_model = compression_algo.model
    minfo_list = compression_algo.sparsified_module_info  # type: List[SparseModuleInfo]
    minfo = minfo_list[0]  # type: SparseModuleInfo

    minfo.operand.binary_mask = torch.ones_like(minfo.module.weight)  # 1x1x2x2
    input_ = torch.ones(size=(1, 1, 5, 5))
    ref_output_1 = -4 * torch.ones(size=(2, 4, 4))
    output_1 = compressed_model(input_)
    assert torch.all(torch.eq(output_1, ref_output_1))

    minfo.operand.binary_mask[0][0][0][1] = 0
    minfo.operand.binary_mask[1][0][1][0] = 0
    ref_output_2 = - 3 * torch.ones_like(ref_output_1)
    output_2 = compressed_model(input_)
    assert torch.all(torch.eq(output_2, ref_output_2))

    minfo.operand.binary_mask[1][0][0][1] = 0
    ref_output_3 = ref_output_2.clone()
    ref_output_3[1] = -2 * torch.ones_like(ref_output_1[1])
    output_3 = compressed_model(input_)
    assert torch.all(torch.eq(output_3, ref_output_3))
def test_can_set_sparse_layers_to_loss():
    model = BasicConvTestModel()
    config = get_basic_sparsity_config()
    config['compression']['train_phase'] = ''
    compression_algo = create_compression_algorithm(model, config)
    loss = compression_algo.loss
    assert isinstance(loss, SparseLoss)
    for layer in loss.sparse_layers:
        assert isinstance(layer, RBSparsifyingWeight)
def start_evaluation(args):
    """Launches the evaluation process"""

    if args.dataset == 'vgg':
        dataset = VGGFace2(args.val,
                           args.v_list,
                           args.v_land,
                           landmarks_training=True)
    elif args.dataset == 'celeb':
        dataset = CelebA(args.val, args.v_land, test=True)
    else:
        dataset = NDG(args.val, args.v_land)

    if dataset.have_landmarks:
        log.info('Use alignment for the train data')
        dataset.transform = t.Compose(
            [Rescale((48, 48)), ToTensor(switch_rb=True)])
    else:
        exit()

    val_loader = DataLoader(dataset,
                            batch_size=args.val_batch_size,
                            num_workers=4,
                            shuffle=False,
                            pin_memory=True)

    model = models_landmarks['landnet']()

    assert args.snapshot is not None
    if args.compr_config:
        config = Config.from_json(args.compr_config)
        compression_algo = create_compression_algorithm(model, config)
        model = compression_algo.model

    log.info('Testing snapshot ' + args.snapshot + ' ...')
    model = load_model_state(model,
                             args.snapshot,
                             args.device,
                             eval_state=True)
    model.eval()
    cudnn.benchmark = True
    model = torch.nn.DataParallel(
        model,
        device_ids=[args.device],
    )

    log.info('Face landmarks model:')
    log.info(model)

    avg_err, per_point_avg_err, failures_rate = evaluate(val_loader, model)
    log.info('Avg RMSE error: {}'.format(avg_err))
    log.info('Per landmark RMSE error: {}'.format(per_point_avg_err))
    log.info('Failure rate: {}'.format(failures_rate))
    if args.compr_config and "sparsity_level" in compression_algo.statistics():
        log.info("Sparsity level: {0:.2f}".format(
            compression_algo.statistics()
            ['sparsity_rate_for_sparsified_modules']))
Ejemplo n.º 20
0
def test_magnitude_sparse_algo_sets_threshold(weight_importance, sparsity_level, threshold):
    model = MagnitudeTestModel()
    config = get_basic_magnitude_sparsity_config()
    config['compression']['weight_importance'] = weight_importance
    config['compression']['params'] = {'schedule': 'multistep'}
    compression_algo = create_compression_algorithm(model, config)
    if sparsity_level:
        compression_algo.set_sparsity_level(sparsity_level)
    assert compression_algo.threshold == pytest.approx(threshold, 0.01)
Ejemplo n.º 21
0
 def test_can_create_sparse_scheduler__with_defaults(self, algo):
     config = get_empty_config()
     config['compression']['algorithm'] = algo
     config['compression']["params"]["schedule"] = 'polynomial'
     compression_algo = create_compression_algorithm(BasicConvTestModel(), config)
     scheduler = compression_algo.scheduler
     assert scheduler.initial_sparsity == 0
     assert scheduler.max_sparsity == 0.5
     assert scheduler.max_step == 90
     assert scheduler.sparsity_training_steps == 100
def test_sparse_algo_can_calc_sparsity_rate__for_2_conv_model():
    model = TwoConvTestModel()

    config = get_basic_sparsity_config()
    compression_algo = create_compression_algorithm(model, config)

    assert compression_algo.sparsified_weights_count == model.weights_num
    assert compression_algo.sparsity_rate_for_model == (
        1 - (model.nz_weights_num + model.nz_bias_num) /
        (model.weights_num + model.bias_num))
    assert compression_algo.sparsity_rate_for_sparsified_modules == 1 - model.nz_weights_num / model.weights_num
Ejemplo n.º 23
0
def test_scale_and_sign_init_for_quant_algo():
    model = TwoConvTestModel()

    config = get_empty_config()
    config['compression'] = {
        'algorithm': 'quantization',
        'initializer': {
            'num_init_steps': 1
        }
    }

    reset_context('orig')
    reset_context('quantized_graphs')
    compression_algo = create_compression_algorithm(model, config)
    model = compression_algo.model

    input_sample_size = config.input_sample_size

    class OnesDatasetMock:
        def __init__(self, input_size):
            self.input_size = input_size
            super().__init__()

        def __getitem__(self, index):
            return torch.ones(self.input_size), torch.ones(1)

        def __len__(self):
            return 1

    data_loader = torch.utils.data.DataLoader(OnesDatasetMock(
        input_sample_size[1:]),
                                              batch_size=1,
                                              num_workers=1,
                                              shuffle=False)
    compression_algo.initialize(data_loader)

    model_conv = get_all_modules_by_type(model, 'Quantize')
    ref_table = {
        '.*Sequential\\[0\\].*UpdateWeight.*': (True, 1),
        '.*Sequential\\[1\\].*UpdateWeight. *': (False, 1),
        '.*activation_quantizers.*Sequential\\[0\\].*': (True, 4),
        '.*activation_quantizers.*Sequential\\[1\\].*': (True, 24)
    }
    for name, module in model_conv.items():
        for pattern, ref_values in ref_table.items():
            match = re.search(pattern, name)
            if match:
                assert isinstance(module, Quantize)
                assert module.signed == ref_values[
                    0], 'sign is not matched for {}'.format(name)
                assert module.scale == ref_values[
                    1], 'scale is not matched for {}'.format(name)
Ejemplo n.º 24
0
def activation_quantizers_dumping_worker(current_gpu, config, tmp_path):
    model = resnet50(pretrained=False)

    reset_context('orig')
    reset_context('quantized_graphs')

    algo = create_compression_algorithm(model, config)
    model = algo.model
    path = get_path_to_keys(tmp_path, current_gpu)
    print(path)
    with open(path, 'w') as f:
        f.writelines("%s\n" % key
                     for key in model.activation_quantizers.keys())
Ejemplo n.º 25
0
def test_can_create_quant_loss_and_scheduler():
    model = BasicConvTestModel()

    config = get_basic_quantization_config()
    reset_context('orig')
    reset_context('quantized_graphs')
    compression_algo = create_compression_algorithm(model, config)

    loss = compression_algo.loss
    assert isinstance(loss, CompressionLoss)

    scheduler = compression_algo.scheduler
    assert isinstance(scheduler, CompressionScheduler)
Ejemplo n.º 26
0
def create_model(config):
    ssd_net = build_ssd(config.model, config.ssd_params,
                        config.input_sample_size[-1], config.num_classes,
                        config)
    ssd_net.to(config.device)
    compression_algo = create_compression_algorithm(ssd_net, config)
    ssd_net = compression_algo.model
    weights = config.get('weights')
    if weights:
        sd = torch.load(weights, map_location='cpu')
        load_state(ssd_net, sd)
    ssd_net.train()
    model, _ = prepare_model_for_execution(ssd_net, config)
    return compression_algo, model
def test_magnitude_scheduler_can_do_epoch_step__with_last():
    model = MagnitudeTestModel()
    config = get_multistep_normed_abs_config()
    compression_algo = create_compression_algorithm(model, config)
    scheduler = compression_algo.scheduler

    scheduler.epoch_step(3)
    assert scheduler.prev_ind == 2
    assert compression_algo.sparsity_level == 0.9
    assert compression_algo.threshold == pytest.approx(0.371, 0.01)

    scheduler.epoch_step()
    assert scheduler.prev_ind == 2
    assert compression_algo.sparsity_level == 0.9
    assert compression_algo.threshold == pytest.approx(0.371, 0.01)
Ejemplo n.º 28
0
def test_magnitude_algo_set_binary_mask_on_forward():
    model = MagnitudeTestModel()
    config = get_basic_magnitude_sparsity_config()
    config['compression']['weight_importance'] = 'abs'
    compression_algo = create_compression_algorithm(model, config)
    compression_algo.set_sparsity_level(0.3)
    model = compression_algo.model
    with torch.no_grad():
        model(torch.ones([1, 1, 10, 10]))

    op = model.conv1.pre_ops['0']
    check_equal(ref_mask_1, op.operand.binary_mask)

    op = model.conv2.pre_ops['0']
    check_equal(ref_mask_2, op.operand.binary_mask)
 def test_sparse_network(self, model_name, model_builder, input_size, algo, params):
     model = model_builder()
     from nncf.layers import NNCF_MODULES_MAP
     sparsifiable_modules = list(NNCF_MODULES_MAP.values())
     ref_num_sparsed = len(get_all_modules_by_type(model, sparsifiable_modules))
     ctx = reset_context('test')
     config = get_empty_config(input_sample_size=input_size)
     config["compression"] = {"algorithm": algo, "params": params}
     compression_algo = create_compression_algorithm(model, config)
     assert ref_num_sparsed == len(compression_algo.sparsified_module_info)
     model = compression_algo.model
     with context('test') as c:
         _ = model(torch.zeros(input_size))
         c.reset_scope_operator_call_counters()
         _ = model(torch.zeros(input_size))
     check_graph(to_networkx(ctx), model_name, algo)
Ejemplo n.º 30
0
    def test_scheduler_can_do_epoch_step(self, algo, schedule, get_params, ref_levels):
        model = BasicConvTestModel()
        config = get_empty_config()
        config['compression']['algorithm'] = algo
        config['compression']["params"] = get_params()
        config['compression']["params"]["schedule"] = schedule
        compression_algo = create_compression_algorithm(model, config)
        scheduler = compression_algo.scheduler

        assert pytest.approx(scheduler.current_sparsity_level) == ref_levels[0]
        for ref_level in ref_levels[1:]:
            scheduler.epoch_step()
            assert pytest.approx(scheduler.current_sparsity_level) == ref_level

        for m in compression_algo.sparsified_module_info:
            assert not m.operand.sparsify