Exemple #1
0
def create_sample_config(args, parser) -> SampleConfig:
    nncf_config = NNCFConfig.from_json(args.config)

    sample_config = SampleConfig.from_json(args.config)
    sample_config.update_from_args(args, parser)
    sample_config.nncf_config = nncf_config

    return sample_config
Exemple #2
0
def test_hawq_manual_configs(manual_config_params):
    config_name, bit_stats = manual_config_params
    config = NNCFConfig.from_json(str(EXAMPLES_DIR.joinpath('classification', 'configs', 'quantization') / config_name))
    config['quantizer_setup_type'] = 'pattern_based'
    config = register_default_init_args(config, train_loader=create_mock_dataloader(config), criterion=None)
    model = load_model(config['model'], pretrained=False)
    model.eval()

    _, compression_ctrl = create_compressed_model_and_algo_for_test(model, config)

    table = compression_ctrl.non_stable_metric_collectors[0].get_bits_stat()
    # pylint: disable=protected-access
    assert table._rows == bit_stats
def get_basic_rb_sparse_model(model_name,
                              local=False,
                              config=CONF,
                              freeze=False):
    model = TEST_MODELS[model_name]()
    if isinstance(config, Path):
        config = NNCFConfig.from_json(config)
    if local:
        config.update({"params": {"sparsity_level_setting_mode": 'local'}})
    compress_model, algo = create_compressed_model_and_algo_for_test(
        model, config, force_no_init=True)
    if freeze:
        algo.freeze()
    return compress_model, algo, config
def test_hawq_manual_configs(manual_config_params, hw_config):
    config_name, bit_stats = manual_config_params
    config = NNCFConfig.from_json(str(EXAMPLES_DIR.joinpath('classification', 'configs', 'quantization') / config_name))
    config = register_default_init_args(config, criterion=None, train_loader=create_mock_dataloader(config))
    if hw_config:
        config['hw_config'] = hw_config.value
    model = load_model(config['model'], pretrained=False)
    model.eval()

    _, compression_ctrl = create_compressed_model_and_algo_for_test(model, config)

    table = compression_ctrl.get_bit_stats()
    # pylint: disable=protected-access
    assert table._rows == bit_stats
def test_distributed_masks_are_equal(quantization):
    # Clean output file
    try:
        os.remove(MASKS_SEEDS_PATH)
    except OSError:
        pass
    # Fill file with seeds
    num_of_replicas = 3
    strategy = tf.distribute.MirroredStrategy(
        [f'GPU:{i}' for i in range(num_of_replicas)])
    with strategy.scope():
        config = NNCFConfig.from_json(CONF)
        if quantization:
            config.update({
                'compression':
                [config['compression'], {
                    'algorithm': 'quantization'
                }]
            })
        model = TEST_MODELS['Conv2D']()
        compression_state_to_skip_init = {
            BaseCompressionAlgorithmController.BUILDER_STATE: {}
        }
        algo, model = create_compressed_model(model, config,
                                              compression_state_to_skip_init)
        model.add_loss(algo.loss)
        compression_callbacks = create_compression_callbacks(
            algo, log_tensorboard=False)

        model.compile(loss=tf.keras.losses.CategoricalCrossentropy(),
                      optimizer=tf.keras.optimizers.Adam(5e-4),
                      metrics=["accuracy"])

        dataset_len_per_replica = 10
        dataset_len = dataset_len_per_replica * num_of_replicas

        dummy_x = tf.random.normal((dataset_len, ) + model.input_shape[1:])
        dummy_y = tf.random.normal((dataset_len, ) + model.output_shape[1:])
        options = tf.data.Options()
        options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF
        dataset = tf.data.Dataset.from_tensor_slices(
            (dummy_x, dummy_y)).batch(num_of_replicas).with_options(options)

        model.fit(dataset,
                  epochs=1,
                  validation_split=0,
                  callbacks=[compression_callbacks])
    # Check seeds in file
    with open(MASKS_SEEDS_PATH, 'r', encoding='utf8') as f:
        seeds = f.readlines()
    seeds_per_replica = defaultdict(list)
    for row in seeds:
        replica_id, *seed = row.split()
        seeds_per_replica[replica_id].append(
            (int(seed[0][1:]), int(seed[1][:-1])))

    # Check seeds are equal for all replicas
    for key, other_key in combinations(seeds_per_replica, 2):
        assert seeds_per_replica[key] == seeds_per_replica[other_key]
    # Check seeds differs during training
    assert len(set(seeds_per_replica['0'])) > 1
    # Remove temporary file
    os.remove(MASKS_SEEDS_PATH)
 def create_nncf_config(self):
     config_path = self._get_config_path()
     return NNCFConfig.from_json(str(config_path))