def test_can_restore_binary_mask_on_magnitude_algo_resume():
    config = get_empty_config()
    config['compression'] = {
        "algorithm": "magnitude_sparsity",
        "weight_importance": "abs",
        "params": {
            "schedule": "multistep",
            "sparsity_levels": [0.3, 0.5]
        }
    }
    magnitude_algo = create_compression_algorithm(MagnitudeTestModel(), config)
    sparse_model = magnitude_algo.model
    with torch.no_grad():
        sparse_model(torch.ones([1, 1, 10, 10]))

    config = get_empty_config()
    config["compression"] = {"algorithm": "const_sparsity"}
    const_algo = create_compression_algorithm(MagnitudeTestModel(), config)
    const_sparse_model = const_algo.model

    load_state(const_sparse_model, sparse_model.state_dict())

    op = const_sparse_model.conv1.pre_ops['0']
    check_equal(ref_mask_1, op.operand.binary_mask)

    op = const_sparse_model.conv2.pre_ops['0']
    check_equal(ref_mask_2, op.operand.binary_mask)
Beispiel #2
0
def test_can_restore_binary_mask_on_magnitude_quant_algo_resume():
    config = get_empty_config()
    config["compression"] = [
        {"algorithm": "magnitude_sparsity", "weight_importance": "abs",
         "params": {"schedule": "multistep", "sparsity_levels": [0.3, 0.5]}},
        {"algorithm": "quantization"}]
    reset_context('orig')
    reset_context('quantized_graphs')
    magnitude_quant_algo = create_compression_algorithm(MagnitudeTestModel(), config)
    # load_state doesn't support CPU + Quantization
    sparse_model = torch.nn.DataParallel(magnitude_quant_algo.model)
    sparse_model.cuda()
    with torch.no_grad():
        sparse_model(torch.ones([1, 1, 10, 10]))

    reset_context('orig')
    reset_context('quantized_graphs')
    config = get_empty_config()
    config["compression"] = [{"algorithm": "const_sparsity"}, {"algorithm": "quantization"}]
    const_algo = create_compression_algorithm(MagnitudeTestModel(), config)
    const_sparse_model = const_algo.model

    load_state(const_sparse_model, sparse_model.state_dict())

    op = const_sparse_model.module.conv1.pre_ops['0']
    check_equal(ref_mask_1, op.operand.binary_mask)

    op = const_sparse_model.module.conv2.pre_ops['0']
    check_equal(ref_mask_2, op.operand.binary_mask)
def test_load_state_skips_not_matched_params__from_smaller_to_larger():
    ref_weights = torch.tensor([[[[3, 2], [2, 3]]]])
    ref_bias = torch.tensor([2.])
    model_save = BasicConvTestModel(out_channels=2)
    model_load = BasicConvTestModel(out_channels=1, weight_init=2, bias_init=2)

    num_loaded = load_state(model_load, model_save.state_dict())

    assert num_loaded == 0
    act_bias = model_load.conv.bias.data
    act_weights = model_load.conv.weight.data
    check_equal(act_bias, ref_bias)
    check_equal(act_weights, ref_weights)
def test_load_state_skips_not_matched_params__from_larger_to_smaller():
    ref_weights = BasicConvTestModel.default_weight()
    ref_bias = BasicConvTestModel.default_bias()
    model_save = BasicConvTestModel(out_channels=1, weight_init=2, bias_init=2)
    model_load = BasicConvTestModel(out_channels=2)

    num_loaded = load_state(model_load, model_save.state_dict())

    act_bias = model_load.conv.bias.data
    act_weights = model_load.conv.weight.data
    assert num_loaded == 0
    check_equal(act_bias, ref_bias)
    check_equal(act_weights, ref_weights)
Beispiel #5
0
def test_magnitude_algo_set_binary_mask_on_forward():
    config = get_basic_magnitude_sparsity_config()
    config['compression']['weight_importance'] = 'abs'
    sparse_model, compression_ctrl = create_compressed_model_and_algo_for_test(MagnitudeTestModel(), config)
    compression_ctrl.set_sparsity_level(0.3)
    with torch.no_grad():
        sparse_model(torch.ones([1, 1, 10, 10]))

    op = sparse_model.conv1.pre_ops['0']
    check_equal(ref_mask_1, op.operand.binary_mask)

    op = sparse_model.conv2.pre_ops['0']
    check_equal(ref_mask_2, op.operand.binary_mask)
def test_basic_model_has_expected_params():
    model = BasicConvTestModel()
    act_weights = model.conv.weight.data
    ref_weights = BasicConvTestModel.default_weight()
    act_bias = model.conv.bias.data
    ref_bias = BasicConvTestModel.default_bias()

    check_equal(act_bias, ref_bias)
    check_equal(act_weights, ref_weights)

    assert act_weights.nonzero().size(0) == model.nz_weights_num
    assert act_bias.nonzero().size(0) == model.nz_bias_num
    assert act_weights.numel() == model.weights_num
    assert act_bias.numel() == model.bias_num
Beispiel #7
0
def test_magnitude_algo_set_binary_mask_on_forward():
    model = MagnitudeTestModel()
    config = get_basic_magnitude_sparsity_config()
    config['compression']['weight_importance'] = 'abs'
    compression_algo = create_compression_algorithm(model, config)
    compression_algo.set_sparsity_level(0.3)
    model = compression_algo.model
    with torch.no_grad():
        model(torch.ones([1, 1, 10, 10]))

    op = model.conv1.pre_ops['0']
    check_equal(ref_mask_1, op.operand.binary_mask)

    op = model.conv2.pre_ops['0']
    check_equal(ref_mask_2, op.operand.binary_mask)
Beispiel #8
0
def test_can_restore_binary_mask_on_magnitude_quant_algo_resume(tmp_path):
    config = get_empty_config()
    config["compression"] = [{
        "algorithm": "magnitude_sparsity",
        "params": {
            "schedule": "multistep",
            "multistep_sparsity_levels": [0.3, 0.5],
            "weight_importance": "abs"
        }
    }, {
        "algorithm": "quantization"
    }]

    sparse_model, _ = create_compressed_model_and_algo_for_test(
        MagnitudeTestModel(), config)

    # load_state doesn't support CPU + Quantization
    sparse_model = torch.nn.DataParallel(sparse_model)
    sparse_model.cuda()
    with torch.no_grad():
        sparse_model(torch.ones([1, 1, 10, 10]))

    config = get_empty_config()
    config["compression"] = [{
        "algorithm": "const_sparsity"
    }, {
        "algorithm": "quantization"
    }]
    const_sparse_model, _ = create_compressed_model_and_algo_for_test(
        MagnitudeTestModel(), config)

    load_state(const_sparse_model, sparse_model.state_dict())

    op = const_sparse_model.get_nncf_wrapped_model().conv1.pre_ops['0']
    check_equal(ref_mask_1, op.operand.binary_mask)

    op = const_sparse_model.get_nncf_wrapped_model().conv2.pre_ops['0']
    check_equal(ref_mask_2, op.operand.binary_mask)
Beispiel #9
0
def test_magnitude_model_has_expected_params():
    model = MagnitudeTestModel()
    act_weights_1 = model.conv1.weight.data
    act_weights_2 = model.conv2.weight.data
    act_bias_1 = model.conv1.bias.data
    act_bias_2 = model.conv2.bias.data

    sub_tensor = torch.tensor([[[[10., 9.], [9., 10.]]]])
    ref_weights_1 = torch.cat((sub_tensor, sub_tensor), 0)
    sub_tensor = torch.tensor([[[[-9., -10., -10.], [-10., -9., -10.],
                                 [-10., -10., -9.]]]])
    ref_weights_2 = torch.cat((sub_tensor, sub_tensor), 1)

    check_equal(act_weights_1, ref_weights_1)
    check_equal(act_weights_2, ref_weights_2)

    check_equal(act_bias_1, torch.tensor([-2., -2]))
    check_equal(act_bias_2, torch.tensor([0]))
def test_two_conv_model_has_expected_params():
    model = TwoConvTestModel()
    act_weights_1 = model.features[0][0].weight.data
    act_weights_2 = model.features[1][0].weight.data
    act_bias_1 = model.features[0][0].bias.data
    act_bias_2 = model.features[1][0].bias.data

    ref_weights_1 = BasicConvTestModel.default_weight()
    channel = torch.eye(3, 3).reshape([1, 1, 3, 3])
    ref_weights_2 = torch.cat((channel, channel), 1)

    check_equal(act_weights_1, ref_weights_1)
    check_equal(act_weights_2, ref_weights_2)

    check_equal(act_bias_1, BasicConvTestModel.default_bias())
    check_equal(act_bias_2, torch.tensor([0]))

    assert act_weights_1.nonzero().size(0) + act_weights_2.nonzero().size(
        0) == model.nz_weights_num
    assert act_bias_1.nonzero().size(0) + act_bias_2.nonzero().size(
        0) == model.nz_bias_num
    assert act_weights_1.numel() + act_weights_2.numel() == model.weights_num
    assert act_bias_1.numel() + act_bias_2.numel() == model.bias_num
def test_two_conv_model_is_valid():
    model = TwoConvTestModel()
    input_ = torch.ones([1, 1, 4, 4])
    ref_output = torch.tensor([-24])
    act_output = model(input_)
    check_equal(ref_output, act_output)
def test_basic_model_is_valid():
    model = BasicConvTestModel()
    input_ = torch.ones([1, 1, 4, 4])
    ref_output = torch.ones((1, 2, 3, 3)) * (-4)
    act_output = model(input_)
    check_equal(ref_output, act_output)