def test_create_rb_algo_with_per_layer_loss(): config = get_empty_config() config['compression'] = {'algorithm': 'rb_sparsity', "params": {"sparsity_level_setting_mode": 'local'}} _, compression_ctrl = create_compressed_model_and_algo_for_test(MockModel(), config) # pylint: disable=protected-access assert isinstance(compression_ctrl._loss, SparseLossForPerLayerSparsity)
def test_can_not_create_magnitude_algo__without_steps(): config = get_basic_magnitude_sparsity_config() config['compression']['params'] = { 'schedule': 'multistep', 'multistep_sparsity_levels': [0.1] } with pytest.raises(AttributeError): _, _ = create_compressed_model_and_algo_for_test(MockModel(), config)
def test_can_create_quant_loss_and_scheduler(): config = get_quantization_config_without_range_init() _, compression_ctrl = create_compressed_model_and_algo_for_test(MockModel(), config) loss = compression_ctrl.loss assert isinstance(loss, CompressionLoss) scheduler = compression_ctrl.scheduler assert isinstance(scheduler, CompressionScheduler)
def test_can_create_magnitude_algo__without_levels(): config = get_basic_magnitude_sparsity_config() config['compression']['params'] = { 'schedule': 'multistep', 'multistep_steps': [1] } _, compression_ctrl = create_compressed_model_and_algo_for_test( MockModel(), config) assert compression_ctrl.scheduler.sparsity_level == approx(0.1)
def test_can_not_create_magnitude_algo__with_adaptive_scheduler(): config = get_empty_config() config['compression'] = { 'algorithm': 'magnitude_sparsity', "params": { "schedule": 'adaptive' } } with pytest.raises(TypeError): _, _ = create_compressed_model_and_algo_for_test(MockModel(), config)
def test_rb_sparsity__can_set_sparsity_level_for_module(): config = get_empty_config() config['compression'] = {'algorithm': 'rb_sparsity', "params": {"sparsity_level_setting_mode": 'local'}} _, compression_ctrl = create_compressed_model_and_algo_for_test(MockModel(), config) # pylint: disable=protected-access assert list(compression_ctrl._loss.per_layer_target.values())[0] == 1 compression_ctrl.set_sparsity_level(0.7, compression_ctrl.sparsified_module_info[0]) assert list(compression_ctrl._loss.per_layer_target.values())[0] == pytest.approx(0.3)
def test_can_create_rb_algo__with_adaptive_scheduler(): config = get_empty_config() config['compression'] = { 'algorithm': 'rb_sparsity', "params": { "schedule": 'adaptive' } } _, compression_ctrl = create_compressed_model_and_algo_for_test( MockModel(), config) assert isinstance(compression_ctrl.scheduler, AdaptiveSparsityScheduler)
def test_can_choose_scheduler(algo, schedule_type, scheduler_class): config = get_empty_config() config['compression'] = { 'algorithm': algo, "params": { "schedule": schedule_type } } _, compression_ctrl = create_compressed_model_and_algo_for_test( MockModel(), config) assert isinstance(compression_ctrl.scheduler, scheduler_class)
def test_hawq_raises_error_if_method_returns_none(mocker, method_name): config = create_hawq_test_config() model = MockModel() config = register_default_init_args(config, mocker.stub(), mocker.stub()) mocker.patch('nncf.quantization.algo.QuantizationController._do_range_init') mocker.patch('nncf.quantization.init_precision.HAWQPrecisionInitializer._calc_traces') mocked_trace = mocker.patch('nncf.quantization.init_precision.HAWQPrecisionInitializer.' + method_name) mocked_trace.return_value = None with pytest.raises(RuntimeError): create_compressed_model_and_algo_for_test(model, config)
def test_create_rb_algo_with_stub_scheduler(): config = get_empty_config() config['compression'] = { 'algorithm': 'rb_sparsity', "params": { "sparsity_level_setting_mode": 'local' } } _, compression_ctrl = create_compressed_model_and_algo_for_test( MockModel(), config) # pylint: disable=protected-access assert isinstance(compression_ctrl.scheduler, StubCompressionScheduler)
def test_hawq_behaviour__if_method_returns_none(mocker, method_name, expected_behavior): config = HAWQConfigBuilder().build() config['quantizer_setup_type'] = 'pattern_based' model = MockModel() config = register_default_init_args(config, mocker.stub(), mocker.stub()) mocker.patch('nncf.quantization.algo.QuantizationController._do_range_init') mocker.patch('nncf.quantization.precision_init.hawq_init.HAWQPrecisionInitializer._calc_traces') mocked_trace = mocker.patch('nncf.quantization.precision_init.hawq_init.HAWQPrecisionInitializer.' + method_name) mocked_trace.return_value = None with expected_behavior: create_compressed_model_and_algo_for_test(model, config)
def test_can_create_sparse_scheduler__with_defaults(self, algo): config = get_empty_config() config['compression'] = { 'algorithm': algo, "params": { "schedule": 'polynomial' } } _, compression_ctrl = create_compressed_model_and_algo_for_test( MockModel(), config) scheduler = compression_ctrl.scheduler assert scheduler.initial_sparsity == 0 assert scheduler.sparsity_target == 0.5 assert scheduler.sparsity_target_epoch == 90 assert scheduler.sparsity_freeze_epoch == 100
def test_input_wrapper_wrap_inputs( mocker, inputs_test_struct: InputWrappingTestStruct): input_infos = inputs_test_struct.input_infos model_args = inputs_test_struct.model_args model_kwargs = inputs_test_struct.model_kwargs ref_wrapping_sequence = inputs_test_struct.ref_wrapping_sequence stub_cpu_model = MockModel() mocker.patch('nncf.dynamic_graph.input_wrapping.nncf_model_input') from nncf.dynamic_graph.input_wrapping import nncf_model_input mgr = InputInfoWrapManager(input_infos, inspect.signature(forward), stub_cpu_model) mgr.wrap_inputs(model_args, model_kwargs) test_wrapping_sequence = [ cl[0][0] for cl in nncf_model_input.call_args_list ] test_identical_to_ref = all( map(torch.equal, ref_wrapping_sequence, test_wrapping_sequence)) assert test_identical_to_ref
def test_range_init_is_called(config_cutter, range_init_call_count, precision_init_call_count, bn_adaptation_call_count, mocker): config = create_hawq_test_config() config['compression']['initializer'].update( {'batchnorm_adaptation': { 'num_bn_adaptation_steps': 5 }}) model = MockModel() config = register_default_init_args(config, mocker.stub(), mocker.stub()) range_init_spy = mocker.patch( 'nncf.quantization.algo.QuantizationController._do_range_init') precision_init_spy = mocker.patch( 'nncf.quantization.init_precision.HAWQPrecisionInitializer.apply_init') bn_adaptation_spy = mocker.patch( 'nncf.initialization.DataLoaderBNAdaptationRunner.run') config_cutter(config['compression']) create_compressed_model_and_algo_for_test(model, config) assert range_init_spy.call_count == range_init_call_count assert precision_init_spy.call_count == precision_init_call_count assert bn_adaptation_spy.call_count == bn_adaptation_call_count