예제 #1
0
파일: run.py 프로젝트: mikhailk62/openvino
def optimize(config):
    """Creates pipeline of compression algorithms and optimize its parameters"""

    if logger.progress_bar_disabled:
        print_algo_configs(config.compression.algorithms)

    # load custom model
    model = load_model(config.model,
                       target_device=config.compression.target_device)

    data_loader = None
    # create custom data loader in case of custom Engine
    if config.engine.type != 'accuracy_checker':
        data_loader = create_data_loader(config.engine, model)

    engine = create_engine(config.engine, data_loader=data_loader, metric=None)

    pipeline = create_pipeline(config.compression.algorithms, engine, 'CLI')

    compressed_model = pipeline.run(model)

    if not config.model.keep_uncompressed_weights:
        compress_model_weights(compressed_model)

    save_model(compressed_model,
               os.path.join(config.model.exec_log_dir, 'optimized'),
               model_name=config.model.model_name)

    # evaluating compressed model if need
    if config.engine.evaluate:
        return pipeline.evaluate(compressed_model)

    return None
예제 #2
0
def test_ranger_graph(_params, tmp_path, models):
    model_name, model_framework = _params

    algorithm_config = Dict({
        'algorithms': [{
            'name': 'Ranger',
            'params': {
                'target_device': 'ANY',
                'stat_subset_size': 100
            }
        }]
    })

    model = models.get(model_name, model_framework, tmp_path)

    engine_config = get_engine_config(model_name)
    config = merge_configs(model.model_params, engine_config, algorithm_config)

    model = load_model(config.model)
    engine = create_engine(config.engine, data_loader=None, metric=None)
    pipeline = create_pipeline(config.compression.algorithms, engine)

    optimized_model = pipeline.run(model)
    check_model(tmp_path, optimized_model, model_name + '_ranger',
                model_framework)
예제 #3
0
def optimize_model(args):
    model_config, engine_config, dataset_config, algorithms = get_configs(args)

    # Step 1: Load the model.
    model = load_model(model_config)

    # Step 2: Initialize the data loader.
    data_loader = ImageNetDataLoader(dataset_config)

    # Step 3 (Optional. Required for AccuracyAwareQuantization): Initialize the metric.
    metric = Accuracy(top_k=1)

    # Step 4: Initialize the engine for metric calculation and statistics collection.
    engine = IEEngine(engine_config, data_loader, metric)

    # Step 5: Create a pipeline of compression algorithms.
    pipeline = create_pipeline(algorithms, engine)

    # Step 6: Execute the pipeline.
    compressed_model = pipeline.run(model)

    # Step 7 (Optional): Compress model weights quantized precision
    #                    in order to reduce the size of final .bin file.
    compress_model_weights(compressed_model)

    return compressed_model, pipeline
예제 #4
0
def test_ranger_graph(_params, tmp_path, models):
    model_name, model_framework = _params

    algorithm_config = Dict({
        'algorithms': [{
            'name': 'Ranger',
            'params': {
                'target_device': 'ANY',
                'stat_subset_size': 1
            }
        }]
    })

    model = models.get(model_name, model_framework, tmp_path)

    test_dir = Path(__file__).parent
    path_image_data = os.path.join(test_dir, 'data/image_data')
    engine_config = Dict({'device': 'CPU',
                          'type': 'simplified',
                          'data_source': path_image_data})
    config = merge_configs(model.model_params, engine_config, algorithm_config)

    model = load_model(config.model)
    data_loader = create_data_loader(engine_config, model)
    engine = create_engine(config.engine, data_loader=data_loader, metric=None)
    pipeline = create_pipeline(config.compression.algorithms, engine)

    optimized_model = pipeline.run(model)
    check_model(tmp_path, optimized_model, model_name + '_ranger', model_framework)
예제 #5
0
def optimize_model(args):
    model_config, engine_config, dataset_config, algorithms = get_configs(args)
    data_loader = ArkDataLoader(dataset_config)
    engine = ArkEngine(config=engine_config, data_loader=data_loader)
    pipeline = create_pipeline(algorithms, engine)

    model = load_model(model_config, target_device='GNA')
    return pipeline.run(model)
예제 #6
0
def test_unify_scales(_params, tmp_path, models):
    model_name, model_framework, algorithm, preset = _params

    algorithm_config = Dict({
        'algorithms': [{
            'name': algorithm,
            'params': {
                'target_device': 'VPU',
                'preset': preset,
                'stat_subset_size': 2
            }
        }]
    })

    def _test_unify_scales(model_, to_unify_):
        for _, fqs in to_unify_:
            ranges = []
            for fq in fqs:
                fq = get_node_by_name(model_, fq)
                fq_inputs = nu.get_node_inputs(fq)[1:]
                ranges.append(
                    tuple(
                        fqut.get_node_value(fq_input)
                        for fq_input in fq_inputs))
                assert all([
                    np.array_equal(r, ranges[0][i])
                    for i, r in enumerate(ranges[-1])
                ])

    model = models.get(model_name, model_framework, tmp_path)

    engine_config = get_engine_config(model_name)
    config = merge_configs(model.model_params, engine_config, algorithm_config)

    model = load_model(config.model)
    pipeline = create_pipeline(config.compression.algorithms,
                               ACEngine(config.engine))
    compressed_model = pipeline.run(model)

    to_unify = fqut.find_fqs_to_unify(
        compressed_model, config.compression.algorithms[0]['params'])
    _test_unify_scales(compressed_model, to_unify)

    ref_path = REFERENCES_PATH.joinpath(model_name + '_to_unify.json')
    if ref_path.exists():
        with open(ref_path.as_posix(), 'r') as f:
            to_unify_ref = json.load(f)
            assert to_unify == to_unify_ref
    else:
        with open(ref_path.as_posix(), 'w+') as f:
            json.dump(to_unify, f, indent=4)
def run_algo(model, model_name, algorithm_config, tmp_path, reference_name):
    engine_config = get_engine_config(model_name)
    config = merge_configs(model.model_params, engine_config, algorithm_config)

    model = load_model(model.model_params)
    data_loader = create_data_loader(engine_config, model)
    engine = create_engine(engine_config, data_loader=data_loader, metric=None)
    pipeline = create_pipeline(algorithm_config.algorithms, engine)

    with torch.backends.mkldnn.flags(enabled=False):
        model = pipeline.run(model)
    paths = save_model(model, tmp_path.as_posix(), reference_name)
    engine.set_model(model)
    metrics = evaluate(config=config, subset=range(1000), paths=paths)
    metrics = OrderedDict([(metric.name, np.mean(metric.evaluated_value))
                           for metric in metrics])

    return metrics, model
예제 #8
0
def test_load_tool_config(config_name, tmp_path, models):
    tool_config_path = TOOL_CONFIG_PATH.joinpath(config_name).as_posix()
    config = Config.read_config(tool_config_path)
    config.configure_params()

    config.engine.log_dir = tmp_path.as_posix()
    config.engine.evaluate = True

    model_name, model_framework = TEST_MODEL
    model = models.get(model_name, model_framework, tmp_path)
    config.model.model = model.model_params.model
    config.model.weights = model.model_params.weights
    provide_dataset_path(config.engine)
    ConfigReader.convert_paths(config.engine)

    pipeline = create_pipeline(config.compression.algorithms, ACEngine(config.engine))

    model = load_model(config.model)
    assert not isinstance(model, int)
    assert pipeline.run(model)
예제 #9
0
    def compress_model():
        telemetry.value = set()
        tool_config_path = TELEMETRY_CONFIG_PATH.joinpath(
            config_name).as_posix()
        config = Config.read_config(tool_config_path)
        config.configure_params()

        config.engine.log_dir = tmp_path.as_posix()
        config.engine.evaluate = True

        model_name, model_framework = TEST_MODEL
        model = models.get(model_name, model_framework, tmp_path)
        config.model.model = model.model_params.model
        config.model.weights = model.model_params.weights

        provide_dataset_path(config.engine)
        ConfigReader.convert_paths(config.engine)

        pipeline = create_pipeline(config.compression.algorithms,
                                   ACEngine(config.engine), 'CLI')
        model = load_model(config.model)
        pipeline.run(model)

        assert set(telemetry.value) == set(expected[config_name])
예제 #10
0
def main():
    parser = ArgumentParser(description='Post-training Compression Toolkit '
                            'Face Detection Sample')
    parser.add_argument('-pm',
                        '--pnet-model',
                        help='Path to .xml of proposal network',
                        required=True)
    parser.add_argument('-pw',
                        '--pnet-weights',
                        help='Path to .bin of proposal network')
    parser.add_argument('-rm',
                        '--rnet-model',
                        help='Path to .xml of refine network',
                        required=True)
    parser.add_argument('-rw',
                        '--rnet-weights',
                        help='Path to .bin of refine network')
    parser.add_argument('-om',
                        '--onet-model',
                        help='Path to .xml of output network',
                        required=True)
    parser.add_argument('-ow',
                        '--onet-weights',
                        help='Path to .bin of output network')
    parser.add_argument('-d',
                        '--dataset',
                        help='Path to the directory with images',
                        required=True)
    parser.add_argument('-a',
                        '--annotation-file',
                        help='File with WIDER FACE annotations in .txt format',
                        required=True)

    args = parser.parse_args()

    model_config = Dict({
        'model_name':
        'mtcnn',
        'cascade': [{
            'name':
            'pnet',
            'model':
            os.path.expanduser(args.pnet_model),
            'weights':
            os.path.expanduser(args.pnet_weights if args.pnet_weights else args
                               .pnet_model.replace('.xml', '.bin'))
        }, {
            'name':
            'rnet',
            'model':
            os.path.expanduser(args.rnet_model),
            'weights':
            os.path.expanduser(args.rnet_weights if args.rnet_weights else args
                               .rnet_model.replace('.xml', '.bin'))
        }, {
            'name':
            'onet',
            'model':
            os.path.expanduser(args.onet_model),
            'weights':
            os.path.expanduser(args.onet_weights if args.onet_weights else args
                               .onet_model.replace('.xml', '.bin'))
        }]
    })

    engine_config = Dict({
        'device': 'CPU',
        'outputs': {
            'probabilities': ['prob1', 'prob1', 'prob1'],
            'regions': ['conv4-2', 'conv5-2', 'conv6-2']
        }
    })

    dataset_config = Dict({
        'data_source':
        os.path.expanduser(args.dataset),
        'annotation_file':
        os.path.expanduser(args.annotation_file)
    })

    algorithms = [{
        'name': 'DefaultQuantization',
        'params': {
            'target_device': 'ANY',
            'preset': 'performance',
            'stat_subset_size': 300
        }
    }]

    # Step 1: Load the model.
    model = load_model(model_config)

    # Step 2: Initialize the data loader.
    data_loader = WiderFaceLoader(dataset_config)

    # Step 3 (Optional. Required for AccuracyAwareQuantization): Initialize the metric.
    metric = Recall()

    # Step 4: Initialize the engine for metric calculation and statistics collection.
    engine = MTCNNEngine(config=engine_config,
                         data_loader=data_loader,
                         metric=metric)

    # Step 5: Create a pipeline of compression algorithms.
    pipeline = create_pipeline(algorithms, engine)

    # Step 6: Execute the pipeline.
    compressed_model = pipeline.run(model)

    # Step 7 (Optional): Compress model weights to quantized precision
    #                    in order to reduce the size of final .bin file.
    compress_model_weights(compressed_model)

    # Step 8: Save the compressed model to the desired path.
    compressed_model.save(os.path.join(os.path.curdir, 'optimized'))

    # Step 9 (Optional): Evaluate the compressed model. Print the results.
    metric_results = pipeline.evaluate(compressed_model)
    if metric_results:
        for name, value in metric_results.items():
            print('{: <27s}: {}'.format(name, value))
예제 #11
0
def main():
    parser = get_common_argparser()
    parser.add_argument('--mask-dir',
                        help='Path to the directory with segmentation masks',
                        required=True)

    args = parser.parse_args()
    if not args.weights:
        args.weights = '{}.bin'.format(os.path.splitext(args.model)[0])

    model_config = Dict({
        'model_name': 'brain-tumor-segmentation-0002',
        'model': os.path.expanduser(args.model),
        'weights': os.path.expanduser(args.weights)
    })

    engine_config = Dict({
        'device': 'CPU',
        'stat_requests_number': 4,
        'eval_requests_number': 4
    })

    dataset_config = Dict({
        'data_source': os.path.expanduser(args.dataset),
        'mask_dir': os.path.expanduser(args.mask_dir),
        'modality_order': [1, 2, 3, 0],
        'size': (128, 128, 128)
    })

    algorithms = [{
        'name': 'DefaultQuantization',
        'params': {
            'target_device': 'ANY',
            'preset': 'performance',
            'stat_subset_size': 200
        }
    }]

    # Step 1: Load the model.
    model = load_model(model_config)

    # Step 2: Initialize the data loader.
    data_loader = BRATSDataLoader(dataset_config)

    # Step 3 (Optional. Required for AccuracyAwareQuantization): Initialize the metric.
    metric = DiceIndex(num_classes=4)

    # Step 4: Initialize the engine for metric calculation and statistics collection.
    engine = SegmentationEngine(config=engine_config,
                                data_loader=data_loader,
                                metric=metric)

    # Step 5: Create a pipeline of compression algorithms.
    pipeline = create_pipeline(algorithms, engine)

    # Step 6: Execute the pipeline.
    compressed_model = pipeline.run(model)

    # Step 7 (Optional):  Compress model weights to quantized precision
    #                     in order to reduce the size of final .bin file.
    compress_model_weights(compressed_model)

    # Step 8: Save the compressed model to the desired path.
    save_model(compressed_model, os.path.join(os.path.curdir, 'optimized'))

    # Step 9 (Optional): Evaluate the compressed model. Print the results.
    metric_results = pipeline.evaluate(compressed_model)
    if metric_results:
        for name, value in metric_results.items():
            print('{: <27s}: {}'.format(name, value))
예제 #12
0
def main():
    parser = get_common_argparser()
    parser.add_argument('--annotation-path',
                        help='Path to the directory with annotation file',
                        required=True)
    args = parser.parse_args()
    if not args.weights:
        args.weights = '{}.bin'.format(os.path.splitext(args.model)[0])

    model_config = Dict({
        'model_name': 'ssd_mobilenet_v1_fpn',
        'model': os.path.expanduser(args.model),
        'weights': os.path.expanduser(args.weights)
    })

    engine_config = Dict({'device': 'CPU'})

    dataset_config = Dict({
        'images_path':
        os.path.expanduser(args.dataset),
        'annotation_path':
        os.path.expanduser(args.annotation_path),
    })
    algorithms = [{
        'name': 'AccuracyAwareQuantization',
        'params': {
            'target_device': 'ANY',
            'preset': 'mixed',
            'stat_subset_size': 300,
            'maximal_drop': 0.004
        }
    }]

    # Step 1: Load the model.
    model = load_model(model_config)

    # Step 2: Initialize the data loader.
    data_loader = COCOLoader(dataset_config)
    # Step 3 (Optional. Required for AccuracyAwareQuantization): Initialize the metric.
    metric = MAP(91, data_loader.labels)

    # Step 4: Initialize the engine for metric calculation and statistics collection.
    engine = IEEngine(config=engine_config,
                      data_loader=data_loader,
                      metric=metric)

    # Step 5: Create a pipeline of compression algorithms.
    pipeline = create_pipeline(algorithms, engine)

    # Step 6: Execute the pipeline.
    compressed_model = pipeline.run(model)

    # Step 7 (Optional): Compress model weights to quantized precision
    #                    in order to reduce the size of final .bin file.
    compress_model_weights(compressed_model)

    # Step 8: Save the compressed model to the desired path.
    save_model(compressed_model, os.path.join(os.path.curdir, 'optimized'))

    # Step 9 (Optional): Evaluate the compressed model. Print the results.
    metric_results = pipeline.evaluate(compressed_model)
    if metric_results:
        for name, value in metric_results.items():
            print('{: <27s}: {}'.format(name, value))