Beispiel #1
0
def generic_main(args):
    import sys
    from MixedPrecision.tools.utils import summary
    import MixedPrecision.tools.loaders as loaders

    sys.stderr = sys.stdout

    torch.set_num_threads(args.workers)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)

    utils.set_use_gpu(args.gpu, not args.no_bench_mode)
    utils.set_use_half(args.half)
    utils.setup(args)
    utils.show_args(args)

    model = models.__dict__[args.model]()

    summary(model, input_size=(3, 224, 224), batch_size=args.batch_size)

    data = loaders.load_dataset(args, train=True)

    if args.warmup:
        train(args, model, data, args.model, is_warmup=True)

    train(args, model, data, args.model, is_warmup=False)

    sys.exit(0)
Beispiel #2
0
def main():
    from MixedPrecision.tools.loaders import hdf5_loader
    from MixedPrecision.tools.utils import show_args
    import argparse

    parser = argparse.ArgumentParser('Image Net Preprocessor')
    parser.add_argument('--input', type=str, help='Input directory')
    parser.add_argument('--output', type=str, help='Output directory')
    parser.add_argument('--test-only', action='store_true', default=False,
                        help='Do not run the preprocessor')
    parser.add_argument('--speed-test', action='store_true', default=False,
                        help='Run the speed test on the created dataset')
    parser.add_argument('--batch-size', type=int, default=128,
                        help='Batch size to use for the speed test')
    parser.add_argument('--workers', type=int, default=4,
                        help='Number of worker to use for the speed trest')

    t = transforms.Compose([
        transforms.Resize((256, 256))
    ])

    args = parser.parse_args()
    show_args(args)

    if not args.test_only:
        s = time.time()
        preprocess_to_hdf5(t, args.input, args.output)
        e = time.time()
        print('Preprocessed Dataset in {:.4f} min'.format((e - s) / 60))

    if args.speed_test:
        print('Speed test')

        # Create a new args that is usable by our data loader
        args = argparse.Namespace(
            data=args.output,
            workers=args.workers,
            batch_size=args.batch_size
        )
        loader = hdf5_loader(args)
        print(' - {} images available'.format(len(loader.dataset)))

        load = StatStream(20)
        start = time.time()

        for index, (x, y) in enumerate(loader):
            end = time.time()
            ignore(x, y)
            load += end - start

            if index > 100:
                break

            start = time.time()

        print(' - {:.4f} img/sec (min={:.4f}, max={:.4f})'.format(
            args.batch_size / load.avg, args.batch_size / load.max, args.batch_size / load.min))

        print(' - {:.4f} sec (min={:.4f}, max={:.4f}, sd={:.4f})'.format(
            load.avg, load.min, load.max, load.sd))
Beispiel #3
0
    import MixedPrecision.tools.utils as utils

    from apex.fp16_utils import network_to_half

    sys.stderr = sys.stdout

    parser = get_parser()
    args = parser.parse_args()

    torch.set_num_threads(args.workers)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)

    utils.set_use_gpu(args.gpu, not args.no_bench_mode)
    utils.set_use_half(args.half)
    utils.show_args(args)

    data_loader = load_dataset(args, train=True)

    model = utils.enable_cuda(HybridClassifier())

    if args.half:
        model = network_to_half(model)

    criterion = utils.enable_cuda(HybridLoss())

    optimizer = torch.optim.SGD(
        model.parameters(),
        args.lr,
        momentum=args.momentum,
        weight_decay=args.weight_decay
Beispiel #4
0
def main():
    # This does not work but this is what the documentation says to do...
    #try:
    #    import torch.multiprocessing as multiprocessing
    #    multiprocessing.set_start_method('spawn')
    #except Exception as e:
    #    print(e)

    import MixedPrecision.tools.utils as utils
    import argparse

    parser = argparse.ArgumentParser(description='Data loader Benchmark')

    parser.add_argument('--data',
                        type=str,
                        metavar='DIR',
                        help='path to the dataset location')

    parser.add_argument('-j',
                        '--workers',
                        default=4,
                        type=int,
                        metavar='N',
                        help='number of data loading workers (default: 4)')

    parser.add_argument('--epochs',
                        default=10,
                        type=int,
                        metavar='N',
                        help='number of total epochs to run')

    parser.add_argument('-b',
                        '--batch-size',
                        default=256,
                        type=int,
                        metavar='N',
                        help='mini-batch size (default: 256)')

    parser.add_argument('--prof',
                        dest='prof',
                        type=int,
                        default=10,
                        help='Only run N iterations for profiling.')

    parser.add_argument(
        '--loader',
        type=str,
        default='pytorch',
        help='The kind of loader to use (torch, prefetch, benzina, dali, zip)')

    parser.add_argument('--async',
                        action='store_true',
                        default=False,
                        help='Use AsyncPrefetcher')

    args = parser.parse_args()

    utils.set_use_gpu(True, True)
    utils.set_use_half(True)

    utils.show_args(args)

    benchmark_loader(args)