示例#1
0
def main():
    args = parse_args()
    out_dir = os.path.expanduser(args.output)
    if not os.path.isdir(out_dir):
        os.makedirs(out_dir)
    init_logger(level=args.log_level,
                file_name=os.path.join(out_dir, 'log.txt'))
    compressed_model = optimize_model(args)
    save_model(compressed_model, out_dir)
def main():
    argparser = get_common_argparser()
    argparser.add_argument(
        '-a',
        '--annotation-file',
        help='File with Imagenet annotations in .txt format',
        required=True)

    # Steps 1-7: Model optimization
    args = argparser.parse_args()
    compressed_model, pipeline = optimize_model(args)

    # Step 8: Save the compressed model to the desired path.
    save_model(compressed_model, os.path.join(os.path.curdir, 'optimized'))

    # Step 9 (Optional): Evaluate the compressed model. Print the results.
    metric_results = pipeline.evaluate(compressed_model)
    if metric_results:
        for name, value in metric_results.items():
            print('{: <27s}: {}'.format(name, value))
示例#3
0
def main():
    parser = get_common_argparser()
    parser.add_argument('--mask-dir',
                        help='Path to the directory with segmentation masks',
                        required=True)
    parser.add_argument('--imageset-file',
                        help='Path to the ImageSet file',
                        required=True)

    args = parser.parse_args()
    if not args.weights:
        args.weights = '{}.bin'.format(os.path.splitext(args.model)[0])

    model_config = {
        'model_name': 'deeplabv3',
        'model': os.path.expanduser(args.model),
        'weights': os.path.expanduser(args.weights)
    }

    engine_config = {
        'device': 'CPU',
        'stat_requests_number': 4,
        'eval_requests_number': 4
    }

    dataset_config = {
        'data_source': os.path.expanduser(args.dataset),
        'mask_dir': os.path.expanduser(args.mask_dir),
        'imageset_file': os.path.expanduser(args.imageset_file),
        'image_size': 513
    }

    algorithms = [{
        'name': 'DefaultQuantization',
        'params': {
            'target_device': 'ANY',
            'preset': 'performance',
            'stat_subset_size': 300
        }
    }]

    # Step 1: Load the model.
    model = load_model(model_config)

    # Step 2: Initialize the data loader.
    data_loader = VOCSegmentationLoader(dataset_config)

    # Step 3 (Optional. Required for AccuracyAwareQuantization): Initialize the metric.
    metric = MeanIOU(num_classes=21)

    # Step 4: Initialize the engine for metric calculation and statistics collection.
    engine = IEEngine(config=engine_config,
                      data_loader=data_loader,
                      metric=metric)

    # Step 5: Create a pipeline of compression algorithms.
    pipeline = create_pipeline(algorithms, engine)

    # Step 6: Execute the pipeline.
    compressed_model = pipeline.run(model)

    # Step 7 (Optional): Compress model weights to quantized precision
    #                    in order to reduce the size of final .bin file.
    compress_model_weights(compressed_model)

    # Step 8: Save the compressed model to the desired path.
    save_model(compressed_model, os.path.join(os.path.curdir, 'optimized'))

    # Step 9 (Optional): Evaluate the compressed model. Print the results.
    metric_results = pipeline.evaluate(compressed_model)
    if metric_results:
        for name, value in metric_results.items():
            print('{: <27s}: {}'.format(name, value))
def main():
    parser = get_common_argparser()
    parser.add_argument('--annotation-path',
                        help='Path to the directory with annotation file',
                        required=True)
    args = parser.parse_args()
    if not args.weights:
        args.weights = '{}.bin'.format(os.path.splitext(args.model)[0])

    model_config = Dict({
        'model_name': 'ssd_mobilenet_v1_fpn',
        'model': os.path.expanduser(args.model),
        'weights': os.path.expanduser(args.weights)
    })

    engine_config = Dict({'device': 'CPU'})

    dataset_config = Dict({
        'images_path':
        os.path.expanduser(args.dataset),
        'annotation_path':
        os.path.expanduser(args.annotation_path),
    })
    algorithms = [{
        'name': 'AccuracyAwareQuantization',
        'params': {
            'target_device': 'ANY',
            'preset': 'mixed',
            'stat_subset_size': 300,
            'maximal_drop': 0.004
        }
    }]

    # Step 1: Load the model.
    model = load_model(model_config)

    # Step 2: Initialize the data loader.
    data_loader = COCOLoader(dataset_config)
    # Step 3 (Optional. Required for AccuracyAwareQuantization): Initialize the metric.
    metric = MAP(91, data_loader.labels)

    # Step 4: Initialize the engine for metric calculation and statistics collection.
    engine = IEEngine(config=engine_config,
                      data_loader=data_loader,
                      metric=metric)

    # Step 5: Create a pipeline of compression algorithms.
    pipeline = create_pipeline(algorithms, engine)

    # Step 6: Execute the pipeline.
    compressed_model = pipeline.run(model)

    # Step 7 (Optional): Compress model weights to quantized precision
    #                    in order to reduce the size of final .bin file.
    compress_model_weights(compressed_model)

    # Step 8: Save the compressed model to the desired path.
    save_model(compressed_model, os.path.join(os.path.curdir, 'optimized'))

    # Step 9 (Optional): Evaluate the compressed model. Print the results.
    metric_results = pipeline.evaluate(compressed_model)
    if metric_results:
        for name, value in metric_results.items():
            print('{: <27s}: {}'.format(name, value))