Esempio n. 1
0
def create_compression_algorithm_builders(
        config: NNCFConfig,
        should_init: bool = True) -> List[CompressionAlgorithmBuilder]:
    compression_config_json_section = config.get('compression', {})
    compression_config_json_section = deepcopy(compression_config_json_section)

    hw_config_type = None
    hw_config_type_str = config.get("hw_config_type")
    if hw_config_type_str is not None:
        hw_config_type = HWConfigType.from_str(config.get("hw_config_type"))
    if isinstance(compression_config_json_section, dict):
        compression_config = NNCFConfig(compression_config_json_section)
        compression_config.register_extra_structs(
            config.get_all_extra_structs_for_copy())
        compression_config["hw_config_type"] = hw_config_type
        return [
            get_compression_algorithm(compression_config)(
                compression_config, should_init=should_init),
        ]
    retval = []
    for algo_config in compression_config_json_section:
        algo_config = NNCFConfig(algo_config)
        algo_config.register_extra_structs(
            config.get_all_extra_structs_for_copy())
        algo_config["hw_config_type"] = hw_config_type
        retval.append(
            get_compression_algorithm(algo_config)(algo_config,
                                                   should_init=should_init))
    return retval
Esempio n. 2
0
def register_default_init_args(nncf_config: NNCFConfig,
                               data_loader: tf.data.Dataset,
                               batch_size: int,
                               device: str = None) -> NNCFConfig:
    """
    Register extra structures in the NNCFConfig. Initialization of some
    compression algorithms requires certain extra structures.

    :param nncf_config: An instance of the NNCFConfig class without extra structures.
    :param data_loader: Dataset used for initialization.
    :param batch_size: Batch size used for initialization.
    :param device: Device to perform initialization. If `device` is `None` then the device
        of the model parameters will be used.
    :return: An instance of the NNCFConfig class with extra structures.
    """
    nncf_config.register_extra_structs([
        QuantizationRangeInitArgs(data_loader=TFInitializingDataLoader(data_loader, batch_size),
                                  device=device),
        BNAdaptationInitArgs(data_loader=TFInitializingDataLoader(data_loader, batch_size),
                             device=device)
    ])
    return nncf_config
Esempio n. 3
0
def create_compression_algorithm_builders(
        config: NNCFConfig,
        should_init: bool = True) -> List[CompressionAlgorithmBuilder]:
    compression_config_json_section = config.get('compression', {})
    compression_config_json_section = deepcopy(compression_config_json_section)

    hw_config_type = None
    quantizer_setup_type_str = config.get("quantizer_setup_type",
                                          "propagation_based")
    quantizer_setup_type = QuantizerSetupType.from_str(
        quantizer_setup_type_str)
    if quantizer_setup_type == QuantizerSetupType.PROPAGATION_BASED:
        target_device = config.get("target_device", "ANY")
        if target_device != 'TRIAL':
            hw_config_type = HWConfigType.from_str(
                HW_CONFIG_TYPE_TARGET_DEVICE_MAP[target_device])

    if isinstance(compression_config_json_section, dict):
        compression_config = NNCFConfig(compression_config_json_section)
        compression_config.register_extra_structs(
            config.get_all_extra_structs_for_copy())
        compression_config["hw_config_type"] = hw_config_type
        compression_config['quantizer_setup_type'] = quantizer_setup_type
        return [
            get_compression_algorithm(compression_config)(
                compression_config, should_init=should_init),
        ]
    retval = []
    for algo_config in compression_config_json_section:
        algo_config = NNCFConfig(algo_config)
        algo_config.register_extra_structs(
            config.get_all_extra_structs_for_copy())
        algo_config["hw_config_type"] = hw_config_type
        algo_config['quantizer_setup_type'] = quantizer_setup_type
        retval.append(
            get_compression_algorithm(algo_config)(algo_config,
                                                   should_init=should_init))
    return retval
Esempio n. 4
0
def test_init_ranges_are_set(quantization_mode: str, per_channel: bool,
                             range_init_type_vs_ref_vals: Tuple[str, float,
                                                                float, float]):
    class SyntheticDataset(torch.utils.data.Dataset):
        def __init__(self):
            super().__init__()
            self._length = 1

        def __getitem__(self, idx):
            if idx >= self._length:
                raise StopIteration
            test_input_sample = torch.zeros([3, 100, 100])
            for i in range(0, 100):
                for j in range(0, 100):
                    test_input_sample[0][i][j] = i * 100 + j
            test_input_sample[1] = test_input_sample[0]
            test_input_sample[2] = test_input_sample[0]
            return test_input_sample, test_input_sample

        def __len__(self):
            return self._length

    data_loader = torch.utils.data.DataLoader(SyntheticDataset(),
                                              batch_size=1,
                                              drop_last=True)

    range_init_type = range_init_type_vs_ref_vals[0]
    config_with_init = NNCFConfig()
    config_with_init.update({
        "input_info": {
            "sample_size": [1, 3, 100, 100]
        },
        "target_device": "TRIAL",
        "compression": {
            "algorithm": "quantization",
            "activations": {
                "mode": quantization_mode,
                "per_channel": per_channel
            },
            "weights": {
                "mode": quantization_mode,
                "per_channel": per_channel
            },
            "initializer": {
                "range": {
                    "num_init_samples": 1,
                    "type": range_init_type
                }
            }
        }
    })

    if range_init_type == "percentile":
        config_with_init["compression"]["initializer"]["range"]["params"] = {
            "min_percentile": 32.10,
            "max_percentile": 67.89
        }

    # Activations init check
    id_model = SingleConv2dIdentityModel()
    config_with_init.register_extra_structs(
        [QuantizationRangeInitArgs(wrap_dataloader_for_init(data_loader))])
    register_bn_adaptation_init_args(config_with_init)
    _, compression_ctrl = create_compressed_model_and_algo_for_test(
        id_model, config_with_init)

    act_quantizer_info = next(
        iter(compression_ctrl.non_weight_quantizers.values()))

    ref_scale = range_init_type_vs_ref_vals[1]
    ref_input_low = range_init_type_vs_ref_vals[2]
    ref_input_high = range_init_type_vs_ref_vals[3]

    def check_scales(quantizer: BaseQuantizer, per_channel: bool):
        # Absolute tolerance is 1.0 due to percentile value interpolation
        if quantization_mode == 'symmetric':
            assert torch.allclose(quantizer.scale,
                                  torch.ones_like(quantizer.scale) * ref_scale,
                                  atol=1.0)
            if per_channel:
                assert quantizer.scale.numel() == 3
            else:
                assert quantizer.scale.numel() == 1
        else:
            assert torch.allclose(quantizer.input_low,
                                  torch.ones_like(quantizer.input_low) *
                                  ref_input_low,
                                  atol=1.0)
            assert torch.allclose(quantizer.input_range,
                                  torch.ones_like(quantizer.input_low) *
                                  ref_input_high,
                                  atol=1.0)
            if per_channel:
                assert quantizer.input_low.numel() == 3
                assert quantizer.input_range.numel() == 3
            else:
                assert quantizer.input_low.numel() == 1
                assert quantizer.input_range.numel() == 1

    check_scales(act_quantizer_info.quantizer_module_ref, per_channel)
    # Weight init check
    synth_weight_model = SingleConv2dSyntheticWeightModel()
    _, compression_ctrl = create_compressed_model_and_algo_for_test(
        synth_weight_model, config_with_init)

    weight_quantizer_info = next(
        iter(compression_ctrl.weight_quantizers.values()))
    check_scales(weight_quantizer_info.quantizer_module_ref, per_channel)
Esempio n. 5
0
def test_percentile_init(quantization_mode):
    class SyntheticDataset(torch.utils.data.Dataset):
        def __init__(self):
            self._length = 1

        def __getitem__(self, idx):
            if idx >= self._length:
                raise StopIteration
            test_input_sample = torch.zeros([1, 100, 100])
            for i in range(0, 100):
                for j in range(0, 100):
                    test_input_sample[0][i][j] = i * 100 + j
            return test_input_sample, test_input_sample

        def __len__(self):
            return self._length

    data_loader = torch.utils.data.DataLoader(SyntheticDataset(), batch_size=1)

    config_with_init = NNCFConfig()
    config_with_init.update({
        "input_info": {
            "sample_size": [1, 1, 100, 100]
        },
        "compression": {
            "algorithm": "quantization",
            "activations": {
                "mode": quantization_mode,
            },
            "weights": {
                "mode": quantization_mode,
            },
            "initializer": {
                "range": {
                    "num_init_steps": 1,
                    "type": "percentile",
                    "min_percentile": 32.10,
                    "max_percentile": 67.89
                }
            }
        }
    })

    # Activations init check
    id_model = SingleConv2dIdentityModel()
    config_with_init.register_extra_structs(
        [QuantizationRangeInitArgs(data_loader)])
    _, compression_ctrl = create_compressed_model_and_algo_for_test(
        id_model, config_with_init)

    act_quantizer = next(iter(compression_ctrl.non_weight_quantizers.values()))

    def assert_range(quantizer: BaseQuantizer):
        # Absolute tolerance is 1.0 due to percentile value interpolation
        if quantization_mode == 'symmetric':
            assert quantizer.scale.item() == approx(6789, abs=1.0)
        else:
            assert quantizer.input_low.item() == approx(3210, abs=1.0)
            assert quantizer.input_range.item() == approx(3578, abs=1.0)

    assert_range(act_quantizer)
    # Weight init check
    synth_weight_model = SingleConv2dSyntheticWeightModel()
    _, compression_ctrl = create_compressed_model_and_algo_for_test(
        synth_weight_model, config_with_init)

    weight_quantizer = next(
        iter(compression_ctrl.non_weight_quantizers.values()))
    assert_range(weight_quantizer)
Esempio n. 6
0
def register_bn_adaptation_init_args(config: NNCFConfig):
    config.register_extra_structs(
        [BNAdaptationInitArgs(data_loader=DummyDataLoader(), device=None)])