def test_generate_image(tmp_path, models, model_name, model_framework, layout,
                        input_shape):
    path_image_data = os.path.join(tmp_path, 'pot_dataset')
    stat_subset_size = 5
    engine_config = Dict({
        'device': 'CPU',
        'type': 'data_free',
        'data_source': path_image_data,
        'subset_size': stat_subset_size,
        'layout': layout,
        'shape': input_shape,
        'generate_data': 'True'
    })
    model = models.get(model_name, model_framework, tmp_path)
    model = load_model(model.model_params)
    data_loader = create_data_loader(engine_config, model)

    num_images_from_data_loader = len(list(data_loader))
    num_images_in_dir = len(os.listdir(path_image_data))
    assert num_images_from_data_loader == num_images_in_dir == stat_subset_size

    image = data_loader[0]
    if input_shape is None:
        in_node = get_nodes_by_type(model, ['Parameter'], recursively=False)[0]
        input_shape = tuple(in_node.shape[1:])
    elif len(input_shape) == 4:
        input_shape = input_shape[1:]

    assert image.shape == input_shape
Example #2
0
def optimize(config):
    """Creates pipeline of compression algorithms and optimize its parameters"""

    if logger.progress_bar_disabled:
        print_algo_configs(config.compression.algorithms)

    # load custom model
    model = load_model(config.model,
                       target_device=config.compression.target_device)

    data_loader = None
    # create custom data loader in case of custom Engine
    if config.engine.type != 'accuracy_checker':
        data_loader = create_data_loader(config.engine, model)

    engine = create_engine(config.engine, data_loader=data_loader, metric=None)

    pipeline = create_pipeline(config.compression.algorithms, engine, 'CLI')

    compressed_model = pipeline.run(model)

    if not config.model.keep_uncompressed_weights:
        compress_model_weights(compressed_model)

    save_model(compressed_model,
               os.path.join(config.model.exec_log_dir, 'optimized'),
               model_name=config.model.model_name)

    # evaluating compressed model if need
    if config.engine.evaluate:
        return pipeline.evaluate(compressed_model)

    return None
Example #3
0
def test_ranger_graph(_params, tmp_path, models):
    model_name, model_framework = _params

    algorithm_config = Dict({
        'algorithms': [{
            'name': 'Ranger',
            'params': {
                'target_device': 'ANY',
                'stat_subset_size': 1
            }
        }]
    })

    model = models.get(model_name, model_framework, tmp_path)

    test_dir = Path(__file__).parent
    path_image_data = os.path.join(test_dir, 'data/image_data')
    engine_config = Dict({'device': 'CPU',
                          'type': 'simplified',
                          'data_source': path_image_data})
    config = merge_configs(model.model_params, engine_config, algorithm_config)

    model = load_model(config.model)
    data_loader = create_data_loader(engine_config, model)
    engine = create_engine(config.engine, data_loader=data_loader, metric=None)
    pipeline = create_pipeline(config.compression.algorithms, engine)

    optimized_model = pipeline.run(model)
    check_model(tmp_path, optimized_model, model_name + '_ranger', model_framework)
def test_statistics_collector_subsets(tmp_path, models, model_name,
                                      model_framework):
    with open(PATHS2DATASETS_CONFIG.as_posix()) as f:
        data_source = Dict(json.load(f))['ImageNet2012'].pop('source_dir')

    engine_config = Dict({
        'type':
        'simplified',
        'data_source':
        '{}/{}'.format(data_source, 'ILSVRC2012_val*'),
        'device':
        'CPU'
    })

    minmax_config = Dict({
        'target_device': 'CPU',
        'preset': 'performance',
        'stat_subset_size': 1,
        'ignored': []
    })
    bias_correction_config = Dict({
        'target_device': 'CPU',
        'preset': 'performance',
        'stat_subset_size': 2
    })

    model = models.get(model_name, model_framework, tmp_path)
    model = load_model(model.model_params)
    data_loader = create_data_loader(engine_config, model)
    engine = create_engine(engine_config, data_loader=data_loader, metric=None)
    collector = StatisticsCollector(engine)
    min_max_algo = MinMaxQuantization(minmax_config, engine)
    min_max_algo.register_statistics(model, collector)
    bias_correction_algo = BiasCorrection(bias_correction_config, engine)
    bias_correction_algo.register_statistics(model, collector)
    collector.compute_statistics(model)

    out = {
        'MinMaxQuantization':
        collector.get_statistics_for_algorithm('MinMaxQuantization'),
        'BiasCorrection':
        collector.get_statistics_for_algorithm('BiasCorrection')
    }

    refs_file = Path(
        __file__).parent / 'data/test_cases_refs/statistics_data.txt'
    with open(refs_file.as_posix()) as file:
        refs = json.loads(json.load(file))

    eps = 1e-3
    for algo_name, algo_val in out.items():
        for node_name, node_val in algo_val.items():
            for stats_name, stats_val in node_val.items():
                if stats_name == 'batch_mean_param_in':
                    continue
                ref_stats_vals = refs[algo_name][node_name][stats_name]
                for ref_vals, vals in zip(ref_stats_vals, stats_val):
                    assert np.max(np.abs(np.array(ref_vals) - vals)) < eps
def test_check_layout(tmp_path, models, model_name, model_framework, layout, reference_shape):
    test_dir = Path(__file__).parent
    path_image_data = os.path.join(test_dir, "data/image_data")

    engine_config = Dict({"device": "CPU",
                          "type": "simplified",
                          "layout": layout,
                          "data_source": path_image_data})
    model = models.get(model_name, model_framework, tmp_path)
    model = load_model(model.model_params)

    data_loader = create_data_loader(engine_config, model)
    image = data_loader.item()

    assert image.shape == reference_shape
def test_check_image(tmp_path, models, model_name, model_framework):
    test_dir = Path(__file__).parent
    path_image_data = os.path.join(test_dir, "data/image_data")

    engine_config = Dict({"device": "CPU",
                          "type": "simplified",
                          "data_source": path_image_data})
    model = models.get(model_name, model_framework, tmp_path)
    model = load_model(model.model_params)

    data_loader = create_data_loader(engine_config, model)

    num_images_from_data_loader = len(list(data_loader))

    num_images_in_dir = len(os.listdir(path_image_data))

    assert num_images_from_data_loader == num_images_in_dir
def run_algo(model, model_name, algorithm_config, tmp_path, reference_name):
    engine_config = get_engine_config(model_name)
    config = merge_configs(model.model_params, engine_config, algorithm_config)

    model = load_model(model.model_params)
    data_loader = create_data_loader(engine_config, model)
    engine = create_engine(engine_config, data_loader=data_loader, metric=None)
    pipeline = create_pipeline(algorithm_config.algorithms, engine)

    with torch.backends.mkldnn.flags(enabled=False):
        model = pipeline.run(model)
    paths = save_model(model, tmp_path.as_posix(), reference_name)
    engine.set_model(model)
    metrics = evaluate(config=config, subset=range(1000), paths=paths)
    metrics = OrderedDict([(metric.name, np.mean(metric.evaluated_value))
                           for metric in metrics])

    return metrics, model
def create_(tmp_path, models, model_name, model_framework, quantization_mode,
            algo, preset, granularity, type_max, type_min
            ):
    with open(PATHS2DATASETS_CONFIG.as_posix()) as f:
        data_source = Dict(json.load(f))['ImageNet2012'].pop('source_dir')

    engine_config = Dict({'type': 'simplified',
                          'data_source': '{}/{}'.format(data_source, 'ILSVRC2012_val*'),
                          'device': 'CPU'})

    model = models.get(model_name, model_framework, tmp_path)
    model = load_model(model.model_params)
    data_loader = create_data_loader(engine_config, model)
    engine = create_engine(engine_config, data_loader=data_loader, metric=None)
    collector = StatisticsCollector(engine)

    algo_config = get_algo_config(quantization_mode, algo, preset, granularity, type_max,
                                  type_min)
    return model, engine, collector, algo_config
Example #9
0
def test_statistics_collector_subsets(tmp_path, models, model_name,
                                      model_framework):
    with open(PATHS2DATASETS_CONFIG.as_posix()) as f:
        data_source = Dict(json.load(f))['ImageNet2012'].pop('source_dir')

    engine_config = Dict({
        'type':
        'simplified',
        'data_source':
        '{}/{}'.format(data_source, 'ILSVRC2012_val*'),
        'device':
        'CPU'
    })

    minmax_config = Dict({
        'target_device': 'CPU',
        'preset': 'performance',
        'stat_subset_size': 1,
        'ignored': []
    })
    bias_correction_config = Dict({
        'target_device': 'CPU',
        'preset': 'performance',
        'stat_subset_size': 2
    })

    model = models.get(model_name, model_framework, tmp_path)
    model = load_model(model.model_params)
    data_loader = create_data_loader(engine_config, model)
    engine = create_engine(engine_config, data_loader=data_loader, metric=None)
    collector = StatisticsCollector(engine)
    min_max_algo = MinMaxQuantization(minmax_config, engine)
    min_max_algo.register_statistics(model, collector)
    bias_correction_algo = BiasCorrection(bias_correction_config, engine)
    bias_correction_algo.register_statistics(model, collector)
    collector.compute_statistics(model)

    out = {
        'MinMaxQuantization':
        collector.get_statistics_for_algorithm('MinMaxQuantization'),
        'BiasCorrection':
        collector.get_statistics_for_algorithm('BiasCorrection')
    }

    refs_file = Path(
        __file__
    ).parent / 'data/test_cases_refs' / f'{model_name}_statistics_data.json'
    local_path = os.path.join(
        tmp_path, '{}_{}.json'.format(model_name, 'statistics_data'))
    local_file = open(local_path, 'w')

    with open(refs_file.as_posix()) as file:
        refs = json.load(file)

    eps = 1e-3
    local_out = {}
    for algo_name, algo_val in out.items():
        local_out[algo_name] = {}
        for node_name, node_val in algo_val.items():
            if isinstance(node_name, tuple):
                name = f'{node_name[0]}.{node_name[1]}'
            else:
                name = node_name
            local_out[algo_name][name] = {}
            for stats_name, stats_val in node_val.items():
                local_out[algo_name][name][stats_name] = [
                    np.array(v).tolist() for v in stats_val
                ]
    json.dump(local_out, local_file)
    for algo_name, algo_val in out.items():
        for node_name, node_val in algo_val.items():
            for stats_name, stats_val in node_val.items():
                if stats_name in ['batch_mean_param_in', 'shape']:
                    continue
                if isinstance(node_name, tuple):
                    node_name = f'{node_name[0]}.{node_name[1]}'
                ref_stats_vals = refs[algo_name][node_name][stats_name]
                for ref_vals, vals in zip(ref_stats_vals, stats_val):
                    assert np.max(np.abs(np.array(ref_vals) - vals)) < eps