Exemplo n.º 1
0
def optimize_model(args):
    model_config, engine_config, dataset_config, algorithms = get_configs(args)

    # Step 1: Load the model.
    model = load_model(model_config)

    # Step 2: Initialize the data loader.
    data_loader = ImageNetDataLoader(dataset_config)

    # Step 3 (Optional. Required for AccuracyAwareQuantization): Initialize the metric.
    metric = Accuracy(top_k=1)

    # Step 4: Initialize the engine for metric calculation and statistics collection.
    engine = IEEngine(engine_config, data_loader, metric)

    # Step 5: Create a pipeline of compression algorithms.
    pipeline = create_pipeline(algorithms, engine)

    # Step 6: Execute the pipeline.
    compressed_model = pipeline.run(model)

    # Step 7 (Optional): Compress model weights quantized precision
    #                    in order to reduce the size of final .bin file.
    compress_model_weights(compressed_model)

    return compressed_model, pipeline
Exemplo n.º 2
0
def optimize_model(args):
    model_config, engine_config, dataset_config, algorithms = get_configs(args)
    data_loader = ArkDataLoader(dataset_config)
    engine = ArkEngine(config=engine_config, data_loader=data_loader)
    pipeline = create_pipeline(algorithms, engine)

    model = load_model(model_config, target_device='GNA')
    return pipeline.run(model)
Exemplo n.º 3
0
def main():
    parser = get_common_argparser()
    parser.add_argument('--mask-dir',
                        help='Path to the directory with segmentation masks',
                        required=True)
    parser.add_argument('--imageset-file',
                        help='Path to the ImageSet file',
                        required=True)

    args = parser.parse_args()
    if not args.weights:
        args.weights = '{}.bin'.format(os.path.splitext(args.model)[0])

    model_config = {
        'model_name': 'deeplabv3',
        'model': os.path.expanduser(args.model),
        'weights': os.path.expanduser(args.weights)
    }

    engine_config = {
        'device': 'CPU',
        'stat_requests_number': 4,
        'eval_requests_number': 4
    }

    dataset_config = {
        'data_source': os.path.expanduser(args.dataset),
        'mask_dir': os.path.expanduser(args.mask_dir),
        'imageset_file': os.path.expanduser(args.imageset_file),
        'image_size': 513
    }

    algorithms = [{
        'name': 'DefaultQuantization',
        'params': {
            'target_device': 'ANY',
            'preset': 'performance',
            'stat_subset_size': 300
        }
    }]

    # Step 1: Load the model.
    model = load_model(model_config)

    # Step 2: Initialize the data loader.
    data_loader = VOCSegmentationLoader(dataset_config)

    # Step 3 (Optional. Required for AccuracyAwareQuantization): Initialize the metric.
    metric = MeanIOU(num_classes=21)

    # Step 4: Initialize the engine for metric calculation and statistics collection.
    engine = IEEngine(config=engine_config,
                      data_loader=data_loader,
                      metric=metric)

    # Step 5: Create a pipeline of compression algorithms.
    pipeline = create_pipeline(algorithms, engine)

    # Step 6: Execute the pipeline.
    compressed_model = pipeline.run(model)

    # Step 7 (Optional): Compress model weights to quantized precision
    #                    in order to reduce the size of final .bin file.
    compress_model_weights(compressed_model)

    # Step 8: Save the compressed model to the desired path.
    save_model(compressed_model, os.path.join(os.path.curdir, 'optimized'))

    # Step 9 (Optional): Evaluate the compressed model. Print the results.
    metric_results = pipeline.evaluate(compressed_model)
    if metric_results:
        for name, value in metric_results.items():
            print('{: <27s}: {}'.format(name, value))
Exemplo n.º 4
0
def main():
    parser = ArgumentParser(description='Post-training Compression Toolkit '
                            'Face Detection Sample')
    parser.add_argument('-pm',
                        '--pnet-model',
                        help='Path to .xml of proposal network',
                        required=True)
    parser.add_argument('-pw',
                        '--pnet-weights',
                        help='Path to .bin of proposal network')
    parser.add_argument('-rm',
                        '--rnet-model',
                        help='Path to .xml of refine network',
                        required=True)
    parser.add_argument('-rw',
                        '--rnet-weights',
                        help='Path to .bin of refine network')
    parser.add_argument('-om',
                        '--onet-model',
                        help='Path to .xml of output network',
                        required=True)
    parser.add_argument('-ow',
                        '--onet-weights',
                        help='Path to .bin of output network')
    parser.add_argument('-d',
                        '--dataset',
                        help='Path to the directory with images',
                        required=True)
    parser.add_argument('-a',
                        '--annotation-file',
                        help='File with WIDER FACE annotations in .txt format',
                        required=True)

    args = parser.parse_args()

    model_config = Dict({
        'model_name':
        'mtcnn',
        'cascade': [{
            'name':
            'pnet',
            'model':
            os.path.expanduser(args.pnet_model),
            'weights':
            os.path.expanduser(args.pnet_weights if args.pnet_weights else args
                               .pnet_model.replace('.xml', '.bin'))
        }, {
            'name':
            'rnet',
            'model':
            os.path.expanduser(args.rnet_model),
            'weights':
            os.path.expanduser(args.rnet_weights if args.rnet_weights else args
                               .rnet_model.replace('.xml', '.bin'))
        }, {
            'name':
            'onet',
            'model':
            os.path.expanduser(args.onet_model),
            'weights':
            os.path.expanduser(args.onet_weights if args.onet_weights else args
                               .onet_model.replace('.xml', '.bin'))
        }]
    })

    engine_config = Dict({
        'device': 'CPU',
        'outputs': {
            'probabilities': ['prob1', 'prob1', 'prob1'],
            'regions': ['conv4-2', 'conv5-2', 'conv6-2']
        }
    })

    dataset_config = Dict({
        'data_source':
        os.path.expanduser(args.dataset),
        'annotation_file':
        os.path.expanduser(args.annotation_file)
    })

    algorithms = [{
        'name': 'DefaultQuantization',
        'params': {
            'target_device': 'ANY',
            'preset': 'performance',
            'stat_subset_size': 300
        }
    }]

    # Step 1: Load the model.
    model = load_model(model_config)

    # Step 2: Initialize the data loader.
    data_loader = WiderFaceLoader(dataset_config)

    # Step 3 (Optional. Required for AccuracyAwareQuantization): Initialize the metric.
    metric = Recall()

    # Step 4: Initialize the engine for metric calculation and statistics collection.
    engine = MTCNNEngine(config=engine_config,
                         data_loader=data_loader,
                         metric=metric)

    # Step 5: Create a pipeline of compression algorithms.
    pipeline = create_pipeline(algorithms, engine)

    # Step 6: Execute the pipeline.
    compressed_model = pipeline.run(model)

    # Step 7 (Optional): Compress model weights to quantized precision
    #                    in order to reduce the size of final .bin file.
    compress_model_weights(compressed_model)

    # Step 8: Save the compressed model to the desired path.
    compressed_model.save(os.path.join(os.path.curdir, 'optimized'))

    # Step 9 (Optional): Evaluate the compressed model. Print the results.
    metric_results = pipeline.evaluate(compressed_model)
    if metric_results:
        for name, value in metric_results.items():
            print('{: <27s}: {}'.format(name, value))
def main():
    parser = get_common_argparser()
    parser.add_argument('--annotation-path',
                        help='Path to the directory with annotation file',
                        required=True)
    args = parser.parse_args()
    if not args.weights:
        args.weights = '{}.bin'.format(os.path.splitext(args.model)[0])

    model_config = Dict({
        'model_name': 'ssd_mobilenet_v1_fpn',
        'model': os.path.expanduser(args.model),
        'weights': os.path.expanduser(args.weights)
    })

    engine_config = Dict({'device': 'CPU'})

    dataset_config = Dict({
        'images_path':
        os.path.expanduser(args.dataset),
        'annotation_path':
        os.path.expanduser(args.annotation_path),
    })
    algorithms = [{
        'name': 'AccuracyAwareQuantization',
        'params': {
            'target_device': 'ANY',
            'preset': 'mixed',
            'stat_subset_size': 300,
            'maximal_drop': 0.004
        }
    }]

    # Step 1: Load the model.
    model = load_model(model_config)

    # Step 2: Initialize the data loader.
    data_loader = COCOLoader(dataset_config)
    # Step 3 (Optional. Required for AccuracyAwareQuantization): Initialize the metric.
    metric = MAP(91, data_loader.labels)

    # Step 4: Initialize the engine for metric calculation and statistics collection.
    engine = IEEngine(config=engine_config,
                      data_loader=data_loader,
                      metric=metric)

    # Step 5: Create a pipeline of compression algorithms.
    pipeline = create_pipeline(algorithms, engine)

    # Step 6: Execute the pipeline.
    compressed_model = pipeline.run(model)

    # Step 7 (Optional): Compress model weights to quantized precision
    #                    in order to reduce the size of final .bin file.
    compress_model_weights(compressed_model)

    # Step 8: Save the compressed model to the desired path.
    save_model(compressed_model, os.path.join(os.path.curdir, 'optimized'))

    # Step 9 (Optional): Evaluate the compressed model. Print the results.
    metric_results = pipeline.evaluate(compressed_model)
    if metric_results:
        for name, value in metric_results.items():
            print('{: <27s}: {}'.format(name, value))