def test_checkpoint_callback_make_checkpoints(mocker, tmp_path):
    save_freq = 2
    config = get_basic_quantization_config()
    gen_setup_spy = mocker.spy(QuantizationBuilder, '_get_quantizer_setup')

    model, compression_ctrl = create_compressed_model_and_algo_for_test(
        get_basic_conv_test_model(), config, force_no_init=True)
    assert isinstance(compression_ctrl, QuantizationController)

    quantizer_setup = gen_setup_spy.spy_return
    compression_callbacks = create_compression_callbacks(compression_ctrl,
                                                         log_tensorboard=False)
    dataset_len = 8

    dummy_x = tf.random.normal((dataset_len, ) + model.input_shape[1:])
    dummy_y = tf.random.normal((dataset_len, ) + model.output_shape[1:])

    model.compile(loss=tf.losses.CategoricalCrossentropy())

    ckpt_path = tmp_path / 'checkpoint'
    ckpt = tf.train.Checkpoint(
        model=model, compression_state=TFCompressionState(compression_ctrl))
    model.fit(dummy_x,
              dummy_y,
              epochs=5,
              batch_size=2,
              callbacks=[
                  CheckpointManagerCallback(ckpt, str(ckpt_path), save_freq),
                  *compression_callbacks
              ])

    assert sorted(os.listdir(ckpt_path)) == REF_CKPT_DIR[save_freq]

    new_compression_state = load_compression_state(ckpt_path)

    new_model, new_compression_ctrl = create_compressed_model_and_algo_for_test(
        get_basic_conv_test_model(), config, new_compression_state)
    new_model.compile(loss=tf.losses.CategoricalCrossentropy())
    new_ckpt = tf.train.Checkpoint(
        model=new_model,
        compression_state=TFCompressionState(new_compression_ctrl))
    load_checkpoint(new_ckpt, ckpt_path)

    builder = QuantizationBuilder(config)
    builder.load_state(new_compression_state['builder_state'])
    # pylint:disable=protected-access
    new_quantizer_setup = builder._quantizer_setup

    assert _quantization_setup_cmp(quantizer_setup, new_quantizer_setup)
    assert new_compression_ctrl.get_state() == compression_ctrl.get_state()
    assert tf.reduce_all([
        tf.reduce_all(w_new == w)
        for w_new, w in zip(new_model.weights, model.weights)
    ])
Exemple #2
0
def test_magnitude_algo_binary_masks_are_applied():
    input_shape = (1, 5, 5, 1)
    model = get_basic_conv_test_model(input_shape=input_shape[1:])
    config = get_empty_config(input_sample_sizes=input_shape)
    config.update(Dict({'compression': {'algorithm': "magnitude_sparsity"}}))
    compressed_model, _ = create_compressed_model_and_algo_for_test(
        model, config)
    conv = compressed_model.layers[1]
    op_name = list(conv.ops_weights.keys())[0]
    conv.ops_weights[op_name] = {'mask': tf.ones_like(conv.weights[0])}
    input_ = tf.ones(input_shape)
    ref_output_1 = -4 * tf.ones((1, 4, 4, 2))
    output_1 = compressed_model(input_)
    tf.assert_equal(output_1, ref_output_1)

    np_mask = conv.ops_weights[op_name]['mask'].numpy()
    np_mask[0, 1, 0, 0] = 0
    np_mask[1, 0, 0, 1] = 0
    conv.ops_weights[op_name] = {'mask': tf.constant(np_mask)}
    ref_output_2 = -3 * tf.ones_like(ref_output_1)
    output_2 = compressed_model(input_)
    tf.assert_equal(output_2, ref_output_2)

    np_mask[0, 1, 0, 1] = 0
    conv.ops_weights[op_name] = {'mask': tf.constant(np_mask)}
    ref_output_3 = ref_output_2.numpy()
    ref_output_3[..., 1] = -2 * np.ones_like(ref_output_1[..., 1])
    ref_output_3 = tf.constant(ref_output_3)
    output_3 = compressed_model(input_)
    tf.assert_equal(output_3, ref_output_3)
Exemple #3
0
    def test_scheduler_can_do_epoch_step(self, algo, schedule, get_params,
                                         ref_levels):
        model = get_basic_conv_test_model()
        config = get_empty_config()
        config['compression'] = Dict({
            'algorithm': algo,
            'sparsity_init': 0.2,
            "params": {
                **get_params(), "schedule": schedule
            }
        })

        _, compression_ctrl = create_compressed_model_and_algo_for_test(
            model, config)

        scheduler = compression_ctrl.scheduler

        assert pytest.approx(scheduler.current_sparsity_level) == ref_levels[0]
        for ref_level in ref_levels[1:]:
            scheduler.epoch_step()
            assert pytest.approx(scheduler.current_sparsity_level) == ref_level

        _, compression_ctrl = create_compressed_model_and_algo_for_test(
            model, config)
        scheduler = compression_ctrl.scheduler

        assert pytest.approx(scheduler.current_sparsity_level) == ref_levels[0]
        for i, ref_level in enumerate(ref_levels[1:]):
            scheduler.epoch_step(i)
            assert pytest.approx(scheduler.current_sparsity_level) == ref_level
Exemple #4
0
def test_quantization_configs__with_defaults():
    model = get_basic_conv_test_model()
    config = get_basic_quantization_config()

    compression_model, compression_ctrl = create_compressed_model_and_algo_for_test(
        model, config, force_no_init=True)

    assert isinstance(compression_ctrl, QuantizationController)
    check_default_qspecs(compression_model)
def test_can_set_sparse_layers_to_loss():
    model = get_basic_conv_test_model()
    config = get_basic_sparsity_config()
    _, compression_ctrl = create_compressed_model_and_algo_for_test(
        model, config)
    loss = compression_ctrl.loss
    assert isinstance(loss, SparseLoss)
    # pylint: disable=protected-access
    for op, _ in loss._target_ops:
        assert isinstance(op, RBSparsifyingWeight)
Exemple #6
0
def test_quantization_configs__disable_overflow_fix():
    model = get_basic_conv_test_model()

    config = get_basic_quantization_config()
    config['compression'].update({'overflow_fix': 'disable'})
    compression_model, compression_ctrl = create_compressed_model_and_algo_for_test(
        model, config, force_no_init=True)

    assert isinstance(compression_ctrl, QuantizationController)
    check_specs_for_disabled_overflow_fix(compression_model)
Exemple #7
0
def test_basic_model_has_expected_params():
    default_weight = tf.constant([[[[0., -1.], [-1., 0.]]],
                                  [[[0., -1.], [-1., 0.]]]])
    default_weight = tf.transpose(default_weight, (2, 3, 1, 0))
    default_bias = tf.constant([-2., -2.])
    model = get_basic_conv_test_model()
    act_weights = model.layers[1].weights[0]
    ref_weights = default_weight
    act_bias = model.layers[1].weights[1]
    ref_bias = default_bias

    TFTensorListComparator.check_equal(act_bias, ref_bias)
    TFTensorListComparator.check_equal(act_weights, ref_weights)
def test_scheduler_can_do_epoch_step__with_rb_algo():
    config = NNCFConfig()
    config['input_info'] = [{"sample_size": [1, 4, 4, 1]}]
    config['compression'] = {
        'algorithm': 'rb_sparsity',
        'sparsity_init': 0.2,
        "params": {
            'schedule': 'polynomial',
            'power': 1,
            'sparsity_target_epoch': 2,
            'sparsity_target': 0.6,
            'sparsity_freeze_epoch': 3
        }
    }

    _, compression_ctrl = create_compressed_model_and_algo_for_test(
        get_basic_conv_test_model(), config)
    scheduler = compression_ctrl.scheduler
    loss = compression_ctrl.loss

    assert not loss.disabled

    # pylint: disable=protected-access
    for op, op_weights in loss._target_ops:
        assert op.get_trainable_weight(op_weights)

    scheduler.epoch_step()
    assert pytest.approx(loss.target_sparsity_rate, abs=1e-3) == 0.2
    assert pytest.approx(loss(), abs=1e-3) == 16
    assert not loss.disabled

    scheduler.epoch_step()
    assert pytest.approx(loss.target_sparsity_rate, abs=1e-3) == 0.4
    assert pytest.approx(loss(), abs=1e-3) == 64
    assert not loss.disabled

    scheduler.epoch_step()
    assert pytest.approx(loss.target_sparsity_rate, abs=1e-3) == 0.6
    assert pytest.approx(loss(), abs=1e-3) == 144
    assert not loss.disabled

    scheduler.epoch_step()
    assert loss.disabled
    assert pytest.approx(loss.target_sparsity_rate, abs=1e-3) == 0.6
    assert loss() == 0

    for op, op_weights in loss._target_ops:
        assert not op.get_trainable_weight(op_weights)
Exemple #9
0
def test_struct_auxiliary_nodes_nncf_graph():
    model = get_basic_conv_test_model()
    config = get_basic_quantization_config()
    compressed_model, _ = create_compressed_model_and_algo_for_test(
        model, config, force_no_init=True)

    nncf_graph = convert_keras_model_to_nncf_graph(compressed_model)

    input_nodes = nncf_graph.get_input_nodes()
    output_nodes = nncf_graph.get_output_nodes()

    assert len(input_nodes) == 1
    assert len(output_nodes) == 1

    assert input_nodes[0].metatype in INPUT_NOOP_METATYPES
    assert output_nodes[0].metatype in OUTPUT_NOOP_METATYPES
Exemple #10
0
def test_quantization_preset(data):
    model = get_basic_conv_test_model()

    config = get_basic_quantization_config()
    config['target_device'] = data['target_device']
    config['compression'] = {
        'algorithm': 'quantization',
        'preset': data['preset']
    }
    config['compression'].update(data['overrided_param'])
    compression_model, _ = create_compressed_model_and_algo_for_test(
        model, config, force_no_init=True)

    activation_quantizers, weight_quantizers = get_quantizers(
        compression_model)
    for aq in activation_quantizers:
        assert aq.mode == data['expected_activations_q']
    for wq in weight_quantizers:
        assert wq.mode == data['expected_weights_q']
def test_can_create_sparse_loss_and_scheduler():
    model = get_basic_conv_test_model()

    config = get_basic_sparsity_config()
    _, compression_ctrl = create_compressed_model_and_algo_for_test(
        model, config)

    scheduler = compression_ctrl.scheduler
    scheduler.epoch_step()
    loss = compression_ctrl.loss
    assert isinstance(loss, SparseLoss)
    assert not loss.disabled
    assert loss.target_sparsity_rate == approx(0.02)
    assert loss.p == approx(0.05)

    assert isinstance(scheduler, PolynomialSparsityScheduler)
    assert scheduler.current_sparsity_level == approx(0.02)
    assert scheduler.target_level == approx(0.5)
    assert scheduler.target_epoch == 2
    assert scheduler.freeze_epoch == 3
Exemple #12
0
def test_quantization_configs__custom():
    model = get_basic_conv_test_model()

    config = get_basic_quantization_config()
    config['target_device'] = 'TRIAL'
    config['compression'].update({
        "weights": {
            "mode": "asymmetric",
            "per_channel": True,
            "bits": 4
        },
        "activations": {
            "mode": "asymmetric",
            "bits": 4,
            "signed": True,
        },
    })
    compression_model, compression_ctrl = create_compressed_model_and_algo_for_test(
        model, config, force_no_init=True)

    assert isinstance(compression_ctrl, QuantizationController)
    activation_quantizers, weight_quantizers = get_quantizers(
        compression_model)

    ref_weight_qspec = TFQuantizerSpec(mode=QuantizationMode.ASYMMETRIC,
                                       num_bits=4,
                                       signedness_to_force=None,
                                       per_channel=True,
                                       narrow_range=True,
                                       half_range=False)
    for wq in weight_quantizers:
        compare_qspecs(ref_weight_qspec, wq)

    ref_activation_qspec = TFQuantizerSpec(mode=QuantizationMode.ASYMMETRIC,
                                           num_bits=4,
                                           signedness_to_force=True,
                                           per_channel=False,
                                           narrow_range=False,
                                           half_range=False)
    for wq in activation_quantizers:
        compare_qspecs(ref_activation_qspec, wq)
def test_quantization_configs__on_resume_with_compression_state(
        tmp_path, mocker):
    model = get_basic_conv_test_model()
    config = get_basic_quantization_config()
    init_spy = mocker.spy(QuantizationBuilder, 'initialize')
    gen_setup_spy = mocker.spy(QuantizationBuilder, '_get_quantizer_setup')
    dataset = get_dataset_for_test(shape=[4, 4, 1])
    config = register_default_init_args(config, dataset, 10)

    _, compression_ctrl = create_compressed_model_and_algo_for_test(
        model, config)
    assert isinstance(compression_ctrl, QuantizationController)
    init_spy.assert_called()
    gen_setup_spy.assert_called()
    saved_quantizer_setup = gen_setup_spy.spy_return
    check_serialization(saved_quantizer_setup, _quantization_setup_cmp)

    compression_state_to_load = _save_and_load_compression_state(
        compression_ctrl, tmp_path)

    init_spy.reset_mock()
    gen_setup_spy.reset_mock()

    compression_model, compression_ctrl = create_compressed_model_and_algo_for_test(
        model, config, compression_state_to_load)
    assert isinstance(compression_ctrl, QuantizationController)

    init_spy.assert_not_called()
    gen_setup_spy.assert_not_called()
    check_default_qspecs(compression_model)

    builder = QuantizationBuilder(config)
    builder.load_state(compression_state_to_load['builder_state'])
    # pylint:disable=protected-access
    loaded_quantizer_setup = builder._quantizer_setup
    assert _quantization_setup_cmp(loaded_quantizer_setup,
                                   saved_quantizer_setup)
from tests.tensorflow.helpers import get_empty_config
from tests.tensorflow.helpers import get_op_by_cls
from tests.tensorflow.helpers import get_weight_by_name

CONF = Path(
    __file__
).parent.parent.parent / 'data' / 'configs' / 'sequential_model_cifar10_rb_sparsity.json'
MASKS_SEEDS_PATH = Path(__file__).parent / 'output_seeds.txt'

TEST_MODELS = {
    'Dense':
    lambda: get_basic_fc_test_model(input_shape=(4, ), out_shape=10),
    'Conv2D':
    lambda: get_basic_conv_test_model(input_shape=(4, 4, 1),
                                      out_channels=2,
                                      kernel_size=2,
                                      weight_init=-1.,
                                      bias_init=-2.,
                                      transpose=False),
    'Conv2DTranspose':
    lambda: get_basic_conv_test_model(input_shape=(4, 4, 1),
                                      out_channels=1,
                                      kernel_size=2,
                                      weight_init=-1.,
                                      bias_init=-2.,
                                      transpose=True),
}


def get_basic_rb_sparse_model(model_name,
                              local=False,
                              config=CONF,
Exemple #15
0
def test_basic_model_is_valid():
    model = get_basic_conv_test_model()
    input_ = tf.ones([1, 4, 4, 1])
    ref_output = tf.ones((1, 3, 3, 2)) * (-4)
    act_output = model(input_)
    TFTensorListComparator.check_equal(ref_output, act_output)
def get_simple_compressed_model(compression_state=None):
    model = get_basic_conv_test_model()
    config = get_empty_config()
    config.update({'compression': {'algorithm': 'magnitude_sparsity'}})
    compression_ctrl, model = create_compressed_model(model, config, compression_state=compression_state)
    return compression_ctrl, model