def test_evolution_env_setting_params():
    steps_ref = 100
    prune_target_ref = 0.1
    train_optimizer = partial(optim.Adam)

    model = PruningTestModel()
    config = create_default_legr_config()
    config['compression']['params']['legr_params'] = {}
    config['compression']['params']['legr_params']['train_steps'] = steps_ref
    config['compression']['params']['legr_params'][
        'max_pruning'] = prune_target_ref
    train_loader = create_ones_mock_dataloader(config)
    val_loader = create_ones_mock_dataloader(config)
    train_steps_fn = lambda *x: None
    validate_fn = lambda *x: (0, 0)
    nncf_config = register_default_init_args(
        config,
        train_loader=train_loader,
        train_steps_fn=train_steps_fn,
        val_loader=val_loader,
        validate_fn=validate_fn,
        legr_train_optimizer=train_optimizer)
    _, compression_ctrl = create_compressed_model_and_algo_for_test(
        model, nncf_config)
    evolution_env = compression_ctrl.legr.env

    assert evolution_env.prune_target == prune_target_ref
    assert evolution_env.steps == steps_ref
    assert evolution_env.train_optimizer == train_optimizer
def test_evolution_env_default_params():
    model = PruningTestModel()
    config = create_default_legr_config()
    train_loader = create_ones_mock_dataloader(config)
    val_loader = create_ones_mock_dataloader(config)
    train_steps_fn = lambda *x: None
    validate_fn = lambda *x: (0, 0)
    nncf_config = register_default_init_args(config,
                                             train_loader=train_loader,
                                             train_steps_fn=train_steps_fn,
                                             val_loader=val_loader,
                                             validate_fn=validate_fn)
    _, compression_ctrl = create_compressed_model_and_algo_for_test(
        model, config)
    evolution_env = compression_ctrl.legr.env

    assert evolution_env.loss_as_reward is True
    assert evolution_env.prune_target == 0.5
    assert evolution_env.steps == 200

    assert evolution_env.train_loader == train_loader
    assert evolution_env.val_loader == val_loader
    assert evolution_env.train_fn == train_steps_fn
    assert evolution_env.validate_fn == validate_fn
    assert evolution_env.config == nncf_config
Example #3
0
def test_legr_class_setting_params(tmp_path):
    generations_ref = 150
    train_steps_ref = 50
    max_pruning_ref = 0.1

    model = PruningTestModel()
    config = create_default_legr_config()
    config['compression']['params']['legr_params'] = {}
    config['compression']['params']['legr_params']['generations'] = generations_ref
    config['compression']['params']['legr_params']['train_steps'] = train_steps_ref
    config['compression']['params']['legr_params']['max_pruning'] = max_pruning_ref
    config['compression']['params']['legr_params']['random_seed'] = 1

    train_loader = create_ones_mock_dataloader(config)
    val_loader = create_ones_mock_dataloader(config)
    train_steps_fn = lambda *x: None
    validate_fn = lambda *x: (0, 0)
    nncf_config = register_default_init_args(config, train_loader=train_loader, train_steps_fn=train_steps_fn,
                                             val_loader=val_loader, validate_fn=validate_fn)
    _, compression_ctrl = create_compressed_model_and_algo_for_test(model, nncf_config)

    compression_ctrl.legr.num_generations = generations_ref
    compression_ctrl.legr.max_pruning = max_pruning_ref
    compression_ctrl.legr._train_steps = train_steps_ref
    compression_ctrl.legr.random_seed = 1
Example #4
0
def test_legr_class_default_params(tmp_path):
    model = PruningTestModel()
    config = create_default_legr_config()
    train_loader = create_ones_mock_dataloader(config)
    val_loader = create_ones_mock_dataloader(config)
    train_steps_fn = lambda *x: None
    validate_fn = lambda *x: (0, 0)
    nncf_config = register_default_init_args(config, train_loader=train_loader, train_steps_fn=train_steps_fn,
                                             val_loader=val_loader, validate_fn=validate_fn)
    _, compression_ctrl = create_compressed_model_and_algo_for_test(model, nncf_config)

    compression_ctrl.legr.num_generations = 400
    compression_ctrl.legr.max_pruning = 0.8
    compression_ctrl.legr._train_steps = 200
    compression_ctrl.legr.random_seed = 42
def get_model_and_controller_for_legr_test():
    model = PruningTestModel()
    config = create_default_legr_config()
    train_loader = create_ones_mock_dataloader(config)
    val_loader = create_ones_mock_dataloader(config)
    train_steps_fn = lambda *x: None
    validate_fn = lambda *x: (0, 0)
    nncf_config = register_default_init_args(config,
                                             train_loader=train_loader,
                                             train_steps_fn=train_steps_fn,
                                             val_loader=val_loader,
                                             validate_fn=validate_fn)
    compressed_model, compression_ctrl = create_compressed_model_and_algo_for_test(
        model, config)
    return nncf_config, compressed_model, compression_ctrl
def test_knowledge_distillation_outputs_containers_parsing():
    mse = torch.nn.MSELoss()
    input_size = [1, 1, 8, 8]
    model = ContainersOutputsModel(input_size)
    fill_params_of_model_by_normal(model)
    dumped_orig_model = deepcopy(model)
    sparsity_level = 0.3
    batch_size = 1 if torch.cuda.device_count(
    ) == 0 else torch.cuda.device_count()
    config = get_kd_config(
        get_sparsity_config_with_sparsity_init(
            get_basic_magnitude_sparsity_config(input_sample_size=input_size),
            sparsity_level))
    model, compression_ctrl = create_compressed_model_and_algo_for_test(
        model, config)
    model.train()
    mock_dataloader = create_ones_mock_dataloader(
        config, num_samples=torch.cuda.device_count(), batch_size=batch_size)
    compression_ctrl.scheduler.epoch_step()
    for _, (input_, __) in enumerate(mock_dataloader):
        input_ = input_.to(next(model.parameters()).device)
        outputs = model(input_)
        kd_outputs = dumped_orig_model(input_)

        reference_kd_loss = mse(outputs['xa'], kd_outputs['xa']) + \
                            mse(outputs['xb_and_xc'][0], kd_outputs['xb_and_xc'][0]) + \
                            mse(outputs['xb_and_xc'][1], kd_outputs['xb_and_xc'][1])
        actual_kd_loss = compression_ctrl.loss()
        assert torch.allclose(reference_kd_loss, actual_kd_loss)
Example #7
0
 def create_dataloader(wrap_dataloader,
                       config,
                       num_samples=1) -> DataLoader:
     data_loader = create_ones_mock_dataloader(config, num_samples)
     if wrap_dataloader:
         data_loader = DefaultInitializingDataLoader(data_loader)
     return data_loader
Example #8
0
def create_finetuned_lenet_model_and_dataloader(config,
                                                eval_fn,
                                                finetuning_steps,
                                                learning_rate=1e-3):
    with set_torch_seed():
        train_loader = create_ones_mock_dataloader(config, num_samples=10)
        model = LeNet()
        for param in model.parameters():
            nn.init.uniform_(param, a=0.0, b=0.01)

        data_loader = iter(train_loader)
        optimizer = SGD(model.parameters(), lr=learning_rate)
        for _ in range(finetuning_steps):
            optimizer.zero_grad()
            x, y_gt = next(data_loader)
            y = model(x)
            loss = F.mse_loss(y.sum(), y_gt)
            loss.backward()
            optimizer.step()

    config = register_default_init_args(
        config,
        train_loader=train_loader,
        model_eval_fn=partial(eval_fn, train_loader=train_loader))
    model, compression_ctrl = create_compressed_model_and_algo_for_test(
        model, config)
    return model, train_loader, compression_ctrl
Example #9
0
def test_default_legr_init_struct():
    config = get_basic_pruning_config()
    init_loader = create_ones_mock_dataloader(config)
    nncf_config = register_default_init_args(config, init_loader)

    with pytest.raises(KeyError):
        nncf_config.get_extra_struct(LeGRInitArgs)
def test_loss_outputs_parsing():
    mse = torch.nn.MSELoss()
    input_size = [1, 1, 8, 8]
    model = PartlyNonDifferentialOutputsModel(input_size)
    fill_params_of_model_by_normal(model)
    dumped_orig_model = deepcopy(model)
    sparsity_level = 0.3
    batch_size = 1 if torch.cuda.device_count(
    ) == 0 else torch.cuda.device_count()
    config = get_kd_config(
        get_sparsity_config_with_sparsity_init(
            get_basic_magnitude_sparsity_config(input_sample_size=input_size),
            sparsity_level))
    model, compression_ctrl = create_compressed_model_and_algo_for_test(
        model, config)
    model.train()
    mock_dataloader = create_ones_mock_dataloader(
        config, num_samples=torch.cuda.device_count(), batch_size=batch_size)
    compression_ctrl.scheduler.epoch_step()
    for _, (input_, __) in enumerate(mock_dataloader):
        input_ = input_.to(next(model.parameters()).device)
        outputs = model(input_)
        kd_outputs = dumped_orig_model(input_)
        loss_outputs = []
        for tensor1, tensor2 in zip(outputs, kd_outputs):
            if tensor1.requires_grad:
                loss_outputs.append((tensor1, tensor2))

        reference_kd_loss = sum(
            [mse(item[0], item[1]) for item in loss_outputs])
        actual_kd_loss = compression_ctrl.loss()
        assert torch.allclose(reference_kd_loss, actual_kd_loss)
def test_can_resume_with_algo_mixing(mocker, is_strict):
    desc = TestPrecisionInitDesc().config_with_all_inits()
    all_quantization_init_spies = desc.setup_init_spies(mocker)
    sparsity_config = get_basic_sparsity_config()
    sparsity_config['target_device'] = 'TRIAL'
    config = desc.config
    quantization_section = config['compression']
    config['compression'] = [{
        'algorithm': 'const_sparsity'
    }, quantization_section]

    _, compression_ctrl = create_compressed_model_and_algo_for_test(
        desc.model_creator(), sparsity_config)
    compression_state = compression_ctrl.get_compression_state()

    config = register_default_init_args(
        config, train_loader=create_ones_mock_dataloader(config))
    fn = partial(create_compressed_model_and_algo_for_test,
                 desc.model_creator(),
                 config,
                 compression_state=compression_state)
    if is_strict:
        with pytest.raises(RuntimeError):
            fn()
    else:
        _, compression_ctrl = fn()
        for m in all_quantization_init_spies:
            m.assert_called()
        desc.check_precision_init(compression_ctrl.child_ctrls[1])
Example #12
0
def test_default_distributed_init_struct():
    config = get_basic_pruning_config()
    init_loader = create_ones_mock_dataloader(config)
    register_default_init_args(config, init_loader)

    dist_callbacks = config.get_extra_struct(DistributedCallbacksArgs)
    assert callable(dist_callbacks.wrap_model)
    assert callable(dist_callbacks.unwrap_model)
Example #13
0
def test_valid_legr_init_struct():
    config = get_basic_pruning_config()
    train_loader = create_ones_mock_dataloader(config)
    val_loader = create_ones_mock_dataloader(config)
    train_steps_fn = lambda *x: None
    validate_fn = lambda *x: (0, 0, 0)
    nncf_config = register_default_init_args(config,
                                             train_loader=train_loader,
                                             train_steps_fn=train_steps_fn,
                                             val_loader=val_loader,
                                             validate_fn=validate_fn)

    legr_init_args = config.get_extra_struct(LeGRInitArgs)
    assert legr_init_args.config == nncf_config
    assert legr_init_args.train_loader == train_loader
    assert legr_init_args.val_loader == val_loader
    assert legr_init_args.train_steps_fn == train_steps_fn
Example #14
0
def test_legr_reproducibility():
    np.random.seed(42)
    config = create_default_legr_config()

    train_loader = create_ones_mock_dataloader(config)
    val_loader = create_ones_mock_dataloader(config)
    train_steps_fn = lambda *x: None
    validate_fn = lambda *x: (0, np.random.random())
    nncf_config = register_default_init_args(config, train_loader=train_loader, train_steps_fn=train_steps_fn,
                                             val_loader=val_loader, validate_fn=validate_fn)
    model_1 = PruningTestModel()
    _, compression_ctrl_1 = create_compressed_model_and_algo_for_test(model_1, nncf_config)

    model_2 = PruningTestModel()
    _, compression_ctrl_2 = create_compressed_model_and_algo_for_test(model_2, config)

    assert compression_ctrl_1.ranking_coeffs == compression_ctrl_2.ranking_coeffs
Example #15
0
def test_mock_dump_checkpoint(aa_config):
    is_called_dump_checkpoint_fn = False

    def mock_dump_checkpoint_fn(model, compression_controller,
                                accuracy_aware_runner, aa_log_dir):
        from nncf.api.compression import CompressionAlgorithmController
        from nncf.common.accuracy_aware_training.runner import TrainingRunner
        assert isinstance(model, torch.nn.Module)
        assert isinstance(compression_controller,
                          CompressionAlgorithmController)
        assert isinstance(accuracy_aware_runner, TrainingRunner)
        assert isinstance(aa_log_dir, str)
        nonlocal is_called_dump_checkpoint_fn
        is_called_dump_checkpoint_fn = True

    config = get_quantization_config_without_range_init(LeNet.INPUT_SIZE[-1])
    train_loader = create_ones_mock_dataloader(aa_config, num_samples=10)
    model = LeNet()
    config.update(aa_config)

    def train_fn(compression_ctrl,
                 model,
                 epoch,
                 optimizer,
                 lr_scheduler,
                 train_loader=train_loader):
        pass

    def mock_validate_fn(model, init_step=False, epoch=0):
        return 80

    def configure_optimizers_fn():
        optimizer = SGD(model.parameters(), lr=0.001)
        return optimizer, None

    config = register_default_init_args(config,
                                        train_loader=train_loader,
                                        model_eval_fn=partial(mock_validate_fn,
                                                              init_step=True))

    model, compression_ctrl = create_compressed_model_and_algo_for_test(
        model, config)

    early_stopping_training_loop = EarlyExitCompressionTrainingLoop(
        config, compression_ctrl, dump_checkpoints=True)
    model = early_stopping_training_loop.run(
        model,
        train_epoch_fn=train_fn,
        validate_fn=partial(mock_validate_fn),
        configure_optimizers_fn=configure_optimizers_fn,
        dump_checkpoint_fn=mock_dump_checkpoint_fn)
    assert is_called_dump_checkpoint_fn
Example #16
0
def create_test_quantization_env(model_creator=BasicConvTestModel,
                                 input_info_cfg=None) -> QuantizationEnv:
    if input_info_cfg is None:
        input_info_cfg = {"input_info": {"sample_size": [1, 1, 4, 4]}}

    model = model_creator()
    nncf_network = NNCFNetwork(model,
                               input_infos=create_input_infos(input_info_cfg))
    hw_config_type = HWConfigType.VPU
    hw_config_path = HWConfig.get_path_to_hw_config(hw_config_type)
    hw_config = PTHWConfig.from_json(hw_config_path)
    setup = PropagationBasedQuantizerSetupGenerator(
        NNCFConfig(), nncf_network, hw_config=hw_config).generate_setup()
    dummy_multi_setup = MultiConfigQuantizerSetup.from_single_config_setup(
        setup)
    for qp in dummy_multi_setup.quantization_points.values():
        qconf_constraint_list = []
        qconf = qp.possible_qconfigs[0]
        bit_set = [8, 4, 2] if 'conv' in str(qp.insertion_point) else [8, 4]
        for bits in bit_set:
            adj_qconf = deepcopy(qconf)
            adj_qconf.num_bits = bits
            qconf_constraint_list.append(adj_qconf)
        qp.possible_qconfigs = qconf_constraint_list
    experimental_builder = ExperimentalQuantizationBuilder(
        dummy_multi_setup, setup, {}, hw_config)
    experimental_builder.apply_to(nncf_network)
    # pylint:disable=line-too-long
    experimental_ctrl = experimental_builder.build_controller(nncf_network)
    data_loader = create_ones_mock_dataloader(input_info_cfg)
    constraints = HardwareQuantizationConstraints()
    for qid, qp_id_set in experimental_ctrl.module_id_to_qp_id_translation_dict.items(
    ):
        first_qp_id_for_this_quantizer_module = next(iter(qp_id_set))
        qconfigs = dummy_multi_setup.quantization_points[
            first_qp_id_for_this_quantizer_module].possible_qconfigs
        constraints.add(qid, qconfigs)

    return QuantizationEnv(nncf_network,
                           experimental_ctrl,
                           constraints,
                           data_loader,
                           lambda *x: 0,
                           hw_config_type=HWConfigType.VPU,
                           params=QuantizationEnvParams(
                               compression_ratio=0.15,
                               eval_subset_ratio=1.0,
                               skip_constraint=False,
                               performant_bw=False,
                               finetune=False,
                               bits=[2, 4, 8],
                               dump_init_precision_data=False))
def run_training_for_device_testing(gpu, config: NNCFConfig,
                                    inference_type: str, ngpus_per_node: int,
                                    device_placing: str):
    number_of_iters = 1
    batch_size = 1 if torch.cuda.device_count(
    ) == 0 else torch.cuda.device_count()
    config['input_info']['sample_size'] = [1, 1, 8, 8]
    if inference_type == 'DDP':
        distributed_init_test_default(gpu, ngpus_per_node, config)
        mock_dataloader = create_rank_dataloader(config,
                                                 gpu,
                                                 batch_size * number_of_iters,
                                                 batch_size=batch_size)
    else:
        mock_dataloader = create_ones_mock_dataloader(config,
                                                      num_samples=batch_size *
                                                      number_of_iters,
                                                      batch_size=batch_size)
    model_device = get_model_device(inference_type, gpu)
    model = TwoConvTestModel()
    fill_params_of_model_by_normal(model, std=0.5)

    if device_placing == 'before':
        model.to(model_device)

    model, compression_ctrl = create_compressed_model_and_algo_for_test(
        model, config)

    if inference_type == 'DDP':
        model = post_compression_test_distr_init(compression_ctrl, config,
                                                 ngpus_per_node, model)
    elif inference_type == 'DP':
        model = torch.nn.DataParallel(model)

    optimizer = SGD(model.parameters(), lr=1e-02)
    model.train()
    output_storage = []

    if device_placing == 'after':
        model.to(model_device)

    for _, (input_, __) in enumerate(mock_dataloader):
        input_ = input_.to(next(model.parameters()).device)
        output = model(input_)
        output_storage.append(output)
        loss = compression_ctrl.loss()
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
def test_knowledge_distillation_loss_types(kd_loss_type: str, scale,
                                           temperature):
    torch.manual_seed(2)
    if kd_loss_type == 'softmax':

        def kd_loss_fn(ref_outputs, compressed_model_outputs) -> torch.Tensor:
            return scale * -(nn.functional.log_softmax(
                compressed_model_outputs / temperature, dim=1) *
                             nn.functional.softmax(
                                 ref_outputs / temperature, dim=1)).mean() * (
                                     temperature * temperature *
                                     compressed_model_outputs.shape[1])
    else:

        def mse_loss(x, y):
            return scale * F.mse_loss(x, y)

        kd_loss_fn = mse_loss
    input_size = [1, 100]
    batch_size = 1 if torch.cuda.device_count(
    ) == 0 else torch.cuda.device_count()

    model = nn.Sequential(
        nn.Linear(in_features=input_size[-1], out_features=10), nn.Sigmoid())

    fill_params_of_model_by_normal(model)
    dumped_orig_model = deepcopy(model)
    sparsity_level = 0.5
    config = get_kd_config(get_sparsity_config_with_sparsity_init(
        get_basic_magnitude_sparsity_config(input_sample_size=input_size),
        sparsity_level),
                           kd_type=kd_loss_type,
                           scale=scale,
                           temperature=temperature)
    config['compression'][-1]['type'] = kd_loss_type
    model, compression_ctrl = create_compressed_model_and_algo_for_test(
        model, config)
    model.train()
    mock_dataloader = create_ones_mock_dataloader(
        config, num_samples=torch.cuda.device_count(), batch_size=batch_size)
    compression_ctrl.scheduler.epoch_step()
    for _, (input_, __) in enumerate(mock_dataloader):
        input_ = input_.to(next(model.parameters()).device)
        outputs = model(input_)
        kd_outputs = dumped_orig_model(input_)
        reference_kd_loss = kd_loss_fn(kd_outputs, outputs)
        actual_kd_loss = compression_ctrl.loss()
        assert torch.allclose(reference_kd_loss, actual_kd_loss)
 def add_range_init(config):
     for compression in config['compression']:
         if compression['algorithm'] == 'quantization':
             if 'initializer' not in compression:
                 compression['initializer'] = {}
             compression['initializer'].update(
                 {'range': {
                     'num_init_samples': 1
                 }})
             data_loader = create_ones_mock_dataloader(config)
             config = NNCFConfig.from_dict(config)
             config.register_extra_structs([
                 QuantizationRangeInitArgs(
                     wrap_dataloader_for_init(data_loader))
             ])
     return config
def test_hawq_manual_configs(manual_config_params):
    # Tip: check and correct configs with hardcoded layer names (bitwidth_per_scope attribute)
    # in case you changed quantized NNCFGraph and this test failed
    # with error like `Could not find a quantization point at scope name...`
    config = manual_config_params.create_nncf_config()
    config = register_default_init_args(config,
                                        create_ones_mock_dataloader(config),
                                        criterion=None)
    model = manual_config_params.create_model(config['model'])

    _, compression_ctrl = create_compressed_model_and_algo_for_test(
        model, config)
    nncf_stats = compression_ctrl.statistics()

    expected = manual_config_params.bit_stats
    actual = nncf_stats.quantization

    assert expected.num_wq_per_bitwidth == actual.num_wq_per_bitwidth
    assert expected.num_aq_per_bitwidth == actual.num_aq_per_bitwidth
def run_test_training(gpu, config: NNCFConfig, inference_type: str,
                      ngpus_per_node: int):
    torch.manual_seed(2)
    number_of_iters = 10
    batch_size = 1 if torch.cuda.device_count(
    ) == 0 else torch.cuda.device_count()
    config['input_info']['sample_size'] = [1, 1, 8, 8]
    if inference_type == 'DDP':
        distributed_init_test_default(gpu, ngpus_per_node, config)
        mock_dataloader = create_rank_dataloader(config,
                                                 gpu,
                                                 batch_size * number_of_iters,
                                                 batch_size=batch_size)
    else:
        mock_dataloader = create_ones_mock_dataloader(config,
                                                      num_samples=batch_size *
                                                      number_of_iters,
                                                      batch_size=batch_size)
    model_device = get_model_device(inference_type, gpu)
    model = TwoConvTestModel()
    fill_params_of_model_by_normal(model, std=0.5)
    model.to(model_device)
    dumped_orig_model = deepcopy(model)

    actual_outputs, actual_model = run_actual(deepcopy(model), config,
                                              inference_type, mock_dataloader,
                                              ngpus_per_node)
    reference_outputs = run_reference(model, config, inference_type,
                                      mock_dataloader, ngpus_per_node)
    assert reduce(lambda a, b: a and torch.allclose(b[0], b[1]), zip(actual_outputs, reference_outputs), True), \
        "Outputs of model with actual KD implementation doesn't match outputs from model with reference " \
        "Knowledge Distillation implementation"

    for param1, param2 in zip([
            param
            for name, param in filter(lambda x: KEY_TO_KD_PARAMETERS in x[0],
                                      actual_model.named_parameters())
    ], dumped_orig_model.parameters()):
        assert torch.allclose(param1, param2), "Weights of dumped original model doesn't match weights of original " \
                                               "model used for distillation (most likely weights of original model" \
                                               " are being corrupted due training)"
Example #22
0
def test_model_can_be_loaded_with_resume(_params):
    p = _params
    sample_config_path = p['sample_config_path']
    checkpoint_path = p['checkpoint_path']

    config = SampleConfig.from_json(str(sample_config_path))
    nncf_config = NNCFConfig.from_json(str(sample_config_path))

    config.execution_mode = p['execution_mode']

    config.current_gpu = 0
    config.device = get_device(config)
    config.distributed = config.execution_mode in (
        ExecutionMode.DISTRIBUTED, ExecutionMode.MULTIPROCESSING_DISTRIBUTED)
    if config.distributed:
        config.dist_url = "tcp://127.0.0.1:9898"
        config.dist_backend = "nccl"
        config.rank = 0
        config.world_size = 1
        configure_distributed(config)

    model_name = config['model']
    model = load_model(model_name,
                       pretrained=False,
                       num_classes=config.get('num_classes', 1000),
                       model_params=config.get('model_params'))
    nncf_config = register_default_init_args(
        nncf_config, train_loader=create_ones_mock_dataloader(nncf_config))

    model.to(config.device)
    model, compression_ctrl = create_compressed_model_and_algo_for_test(
        model, nncf_config)
    model, _ = prepare_model_for_execution(model, config)

    if config.distributed:
        compression_ctrl.distributed()

    checkpoint = torch.load(checkpoint_path, map_location='cpu')
    load_state(model, checkpoint['state_dict'], is_resume=True)
Example #23
0
def test_distributed_init_struct():
    class FakeModelClass():
        def __init__(self, model_: nn.Module):
            self.model = model_

        def unwrap(self):
            return self.model

    config = get_basic_pruning_config()
    init_loader = create_ones_mock_dataloader(config)
    wrapper_callback = FakeModelClass
    unwrapper_callback = lambda x: x.unwrap()
    nncf_config = register_default_init_args(
        config,
        init_loader,
        distributed_callbacks=(wrapper_callback, unwrapper_callback))

    dist_callbacks = nncf_config.get_extra_struct(DistributedCallbacksArgs)
    model = PruningTestModel()
    wrapped_model = dist_callbacks.wrap_model(model)
    assert isinstance(wrapped_model, FakeModelClass)
    unwrapped_model = dist_callbacks.unwrap_model(wrapped_model)
    assert unwrapped_model == model
def test_accuracy_aware_config(aa_config, must_raise):
    def mock_validate_fn(model):
        pass

    config = get_quantization_config_without_range_init(LeNet.INPUT_SIZE[-1])

    config.update({
        "accuracy_aware_training": {
            "mode": "adaptive_compression_level",
            "params": {
                "maximal_relative_accuracy_degradation": 1,
                "initial_training_phase_epochs": 1,
                "patience_epochs": 10
            }
        }
    })

    config.update(aa_config)

    train_loader = create_ones_mock_dataloader(config, num_samples=10)
    model = LeNet()

    config = register_default_init_args(config,
                                        train_loader=train_loader,
                                        model_eval_fn=mock_validate_fn)
    model, compression_ctrl = create_compressed_model_and_algo_for_test(
        model, config)

    if must_raise:
        with pytest.raises(RuntimeError):
            _ = create_accuracy_aware_training_loop(config,
                                                    compression_ctrl,
                                                    dump_checkpoints=False)
    else:
        _ = create_accuracy_aware_training_loop(config,
                                                compression_ctrl,
                                                dump_checkpoints=False)
def test_can_resume_with_manual_init(mocker, desc, _nncf_caplog):
    config = desc.config
    config_to_resume = desc.config_to_resume

    config = register_default_init_args(
        config, train_loader=create_ones_mock_dataloader(config))
    all_spies = desc.setup_init_spies(mocker)
    init_spy = mocker.spy(PTCompressionAlgorithmBuilder, '__init__')
    get_setup_spy = mocker.spy(QuantizationBuilder, '_get_quantizer_setup')

    _, compression_ctrl = create_compressed_model_and_algo_for_test(
        desc.model_creator(), config)
    desc.check_precision_init(compression_ctrl)

    for m in all_spies:
        m.assert_called()
        m.reset_mock()
    get_setup_spy.assert_called()
    get_setup_spy.reset_mock()

    compression_state = compression_ctrl.get_compression_state()
    register_bn_adaptation_init_args(config_to_resume)
    _, compression_ctrl = create_compressed_model_and_algo_for_test(
        desc.model_creator(),
        config_to_resume,
        compression_state=compression_state)

    if config_to_resume is not None and config_to_resume['compression'][
            'initializer']:
        assert not init_spy.call_args[0][2]

    for m in all_spies:
        m.assert_not_called()
    get_setup_spy.assert_not_called()

    desc.check_precision_init(compression_ctrl)
Example #26
0
def create_regular_dataloader():
    return create_ones_mock_dataloader(config=get_empty_config(),
                                       num_samples=N_SAMPLE)
Example #27
0
def test_early_exit_with_mock_validation(max_accuracy_degradation,
                                         exit_epoch_number,
                                         maximal_total_epochs=100):
    epoch_counter = 0

    def mock_validate_fn(model, init_step=False, epoch=0):
        original_metric = 0.85
        if init_step:
            return original_metric
        nonlocal epoch_counter
        epoch_counter = epoch
        if "maximal_relative_accuracy_degradation" in max_accuracy_degradation:
            return original_metric * (1 - 0.01 * max_accuracy_degradation[
                'maximal_relative_accuracy_degradation']) * (epoch /
                                                             exit_epoch_number)
        return (original_metric - max_accuracy_degradation['maximal_absolute_accuracy_degradation']) * \
               epoch / exit_epoch_number

    config = get_quantization_config_without_range_init(LeNet.INPUT_SIZE[-1])

    params = {"maximal_total_epochs": maximal_total_epochs}
    params.update(max_accuracy_degradation)
    accuracy_aware_config = {
        "accuracy_aware_training": {
            "mode": "early_exit",
            "params": params
        }
    }

    config.update(accuracy_aware_config)

    train_loader = create_ones_mock_dataloader(config, num_samples=10)
    model = LeNet()

    config = register_default_init_args(config,
                                        train_loader=train_loader,
                                        model_eval_fn=partial(mock_validate_fn,
                                                              init_step=True))

    model, compression_ctrl = create_compressed_model_and_algo_for_test(
        model, config)

    def train_fn(compression_ctrl,
                 model,
                 epoch,
                 optimizer,
                 lr_scheduler,
                 train_loader=train_loader):
        pass

    def configure_optimizers_fn():
        return None, None

    early_stopping_training_loop = EarlyExitCompressionTrainingLoop(
        config, compression_ctrl, dump_checkpoints=False)
    model = early_stopping_training_loop.run(
        model,
        train_epoch_fn=train_fn,
        validate_fn=partial(mock_validate_fn),
        configure_optimizers_fn=configure_optimizers_fn)
    # Epoch number starts from 0
    assert epoch_counter == exit_epoch_number