コード例 #1
0
def main():
    if args.distributed:
        init_distributed()

    model = se_resnet50(num_classes=1000)

    optimizer = optim.SGD(lr=0.6 / 1024 * args.batch_size,
                          momentum=0.9, weight_decay=1e-4)
    scheduler = lr_scheduler.MultiStepLR([50, 70])
    train_loader, test_loader = imagenet_loaders(args.root, args.batch_size, distributed=args.distributed,
                                                 num_train_samples=args.batch_size * 10 if args.debug else None,
                                                 num_test_samples=args.batch_size * 10 if args.debug else None)

    c = [callbacks.AccuracyCallback(), callbacks.AccuracyCallback(k=5),
         callbacks.LossCallback(),
         callbacks.WeightSave('.'),
         reporters.TensorboardReporter('.'),
         reporters.TQDMReporter(range(args.epochs))]

    with SupervisedTrainer(model, optimizer, F.cross_entropy,
                           callbacks=c,
                           scheduler=scheduler,
                           ) as trainer:
        for _ in c[-1]:
            trainer.train(train_loader)
            trainer.test(test_loader)
コード例 #2
0
def main(cfg: Config):
    if cfg.enable_accimage:
        enable_accimage()

    model = resnet50()
    optimizer = optim.SGD(lr=1e-1 * cfg.batch_size * get_num_nodes() / 256, momentum=0.9, weight_decay=1e-4)
    scheduler = lr_scheduler.MultiStepLR([30, 60, 80])
    train_loader, test_loader = DATASET_REGISTRY("fast_imagenet" if cfg.use_fast_collate else
                                                 "imagenet")(cfg.batch_size,
                                                             train_size=cfg.batch_size * 50 if cfg.debug else None,
                                                             test_size=cfg.batch_size * 50 if cfg.debug else None,
                                                             num_workers=cfg.num_workers)

    use_multi_gpus = not is_distributed() and torch.cuda.device_count() > 1
    with SupervisedTrainer(model,
                           optimizer,
                           F.cross_entropy,
                           reporters=[reporters.TensorboardReporter(".")],
                           scheduler=scheduler,
                           data_parallel=use_multi_gpus,
                           use_amp=cfg.use_amp,
                           use_cuda_nonblocking=True,
                           use_sync_bn=cfg.use_sync_bn,
                           report_accuracy_topk=5) as trainer:

        for epoch in trainer.epoch_range(cfg.epochs):
            trainer.train(train_loader)
            trainer.test(test_loader)

        print(f"Max Test Accuracy={max(trainer.reporter.history('accuracy/test')):.3f}")
コード例 #3
0
def main():
    model = se_resnet50(num_classes=1000)

    optimizer = optim.SGD(lr=0.6 / 1024 * args.batch_size, momentum=0.9, weight_decay=1e-4)
    scheduler = lr_scheduler.MultiStepLR([50, 70])

    c = [callbacks.AccuracyCallback(), callbacks.LossCallback()]
    r = reporters.TQDMReporter(range(args.epochs), callbacks=c)
    tb = reporters.TensorboardReporter(c)
    rep = callbacks.CallbackList(r, tb, callbacks.WeightSave("checkpoints"))

    if args.distributed:
        # DistributedSupervisedTrainer sets up torch.distributed
        if args.local_rank == 0:
            print("\nuse DistributedDataParallel")
        trainer = DistributedSupervisedTrainer(model, optimizer, F.cross_entropy, callbacks=rep, scheduler=scheduler,
                                               init_method=args.init_method, backend=args.backend)
    else:
        multi_gpus = torch.cuda.device_count() > 1
        if multi_gpus:
            print("\nuse DataParallel")
        trainer = SupervisedTrainer(model, optimizer, F.cross_entropy, callbacks=rep,
                                    scheduler=scheduler, data_parallel=multi_gpus)
    # if distributed, need to setup loaders after DistributedSupervisedTrainer
    train_loader, test_loader = imagenet_loaders(args.root, args.batch_size, distributed=args.distributed,
                                                 num_train_samples=args.batch_size * 10 if args.debug else None,
                                                 num_test_samples=args.batch_size * 10 if args.debug else None)
    for _ in r:
        trainer.train(train_loader)
        trainer.test(test_loader)
コード例 #4
0
ファイル: imagenet.py プロジェクト: zlwdghh/senet.pytorch
def main():
    if is_distributed():
        init_distributed()

    model = se_resnet50(num_classes=1000)

    optimizer = optim.SGD(lr=0.6 / 1024 * args.batch_size,
                          momentum=0.9,
                          weight_decay=1e-4)
    scheduler = lr_scheduler.MultiStepLR([50, 70])
    train_loader, test_loader = DATASET_REGISTRY("imagenet")(args.batch_size)

    c = [
        callbacks.AccuracyCallback(),
        callbacks.AccuracyCallback(k=5),
        callbacks.LossCallback(),
        callbacks.WeightSave("."),
        reporters.TensorboardReporter("."),
        reporters.TQDMReporter(range(args.epochs)),
    ]

    with SupervisedTrainer(
            model,
            optimizer,
            F.cross_entropy,
            callbacks=c,
            scheduler=scheduler,
    ) as trainer:
        for _ in c[-1]:
            trainer.train(train_loader)
            trainer.test(test_loader)
コード例 #5
0
def test_tb_reporters(c):
    with SupervisedTrainer(model,
                           SGD(lr=0.1),
                           nn.CrossEntropyLoss(),
                           callbacks=[AccuracyCallback(), c]) as t:
        for _ in range(10):
            t.train(loader)
            t.test(loader)
コード例 #6
0
def test_tqdm_reporters():
    c = TQDMReporter(range(4))
    with SupervisedTrainer(model,
                           SGD(lr=0.1),
                           nn.CrossEntropyLoss(),
                           callbacks=[AccuracyCallback(), c]) as t:
        for _ in c:
            t.train(loader)
            t.test(loader)
コード例 #7
0
ファイル: cifar.py プロジェクト: ddghost/neural_network_model
def main():
    train_loader, test_loader = cifar10_loaders(args.batch_size)

    if args.baseline:
        model = resnet20()
    else:
        model = se_resnet20(num_classes=10, reduction=args.reduction)
    optimizer = optim.SGD(lr=1e-1, momentum=0.9, weight_decay=1e-4)
    scheduler = lr_scheduler.StepLR(80, 0.1)
    tqdm_rep = reporters.TQDMReporter(range(args.epochs),
                                      callbacks=[callbacks.AccuracyCallback()])
    trainer = Trainer(model,
                      optimizer,
                      F.cross_entropy,
                      scheduler=scheduler,
                      callbacks=[tqdm_rep])
    for _ in tqdm_rep:
        trainer.train(train_loader)
        trainer.test(test_loader)
コード例 #8
0
def main():
    if args.distributed:
        init_distributed()
    if args.enable_accimage:
        enable_accimage()

    model = resnet50()
    optimizer = optim.SGD(lr=1e-1 * args.batch_size * get_num_nodes() / 256,
                          momentum=0.9,
                          weight_decay=1e-4)
    scheduler = lr_scheduler.MultiStepLR([30, 60, 80])
    c = [callbacks.AccuracyCallback(), callbacks.LossCallback()]
    r = reporters.TQDMReporter(range(args.epochs), callbacks=c)
    tb = reporters.TensorboardReporter(c)
    rep = callbacks.CallbackList(r, tb, callbacks.WeightSave("checkpoints"))
    _train_loader, _test_loader = imagenet_loaders(
        args.root,
        args.batch_size,
        distributed=args.distributed,
        num_train_samples=args.batch_size * 10 if args.debug else None,
        num_test_samples=args.batch_size * 10 if args.debug else None)

    if args.distributed:
        # DistributedSupervisedTrainer sets up torch.distributed
        if args.local_rank == 0:
            print("\nuse DistributedDataParallel\n")
        trainer = DistributedSupervisedTrainer(model,
                                               optimizer,
                                               F.cross_entropy,
                                               callbacks=rep,
                                               scheduler=scheduler,
                                               init_method=args.init_method,
                                               backend=args.backend,
                                               enable_amp=args.enable_amp)
    else:
        use_multi_gpus = torch.cuda.device_count() > 1
        if use_multi_gpus:
            print("\nuse DataParallel\n")
        trainer = SupervisedTrainer(model,
                                    optimizer,
                                    F.cross_entropy,
                                    callbacks=rep,
                                    data_parallel=use_multi_gpus)

    for epoch in r:
        if args.use_prefetcher:
            train_loader = prefetcher.DataPrefetcher(_train_loader)
            test_loader = prefetcher.DataPrefetcher(_test_loader)
        else:
            train_loader, test_loader = _train_loader, _test_loader
        # following apex's training scheme
        trainer.train(train_loader)
        trainer.test(test_loader)

    rep.close()
コード例 #9
0
def main(args):
    import sys
    sys.path.append('/home/kp600168/semi')
    from allinone import SEMI_DATASET_REGISTRY, SEMI_TRAINER_REGISTRY
    print(args)
    mnist = SEMI_DATASET_REGISTRY('split')(MNIST,
                                           args.dataset,
                                           10, [
                                               tf.Resize((32, 32)),
                                               tf.ToTensor(),
                                               tf.Normalize((0.1307, ),
                                                            (0.3081, ))
                                           ],
                                           semi_size=args.semi_size)
    train_loader, test_loader, _, num_classes = mnist(args.batch_size,
                                                      num_workers=0,
                                                      return_num_classes=True)
    lenet = MODEL_REGISTRY(args.model)(num_classes=num_classes)
    kwargs = {
        'bn_list': [lenet.bn1, lenet.bn2, lenet.bn3],
        'sigma_list': [0.3, 0.3, 0.3],
        'v_list':
        [ConvTranspose2d(16, 6, 10, 2),
         ConvTranspose2d(120, 16, 10)],
        'lam_list': [0.1, 0.01, 0.01],
    }
    trainer = SEMI_TRAINER_REGISTRY('Ladder')(lenet,
                                              SGD(lr=args.lr_256 *
                                                  args.batch_size / 256,
                                                  momentum=0.9),
                                              F.cross_entropy,
                                              **kwargs,
                                              reporters=[TQDMReporter()])

    # for _ in trainer.epoch_range(args.epochs):
    #     trainer.train(train_loader)
    #     trainer.test(test_loader)

    trainer = SupervisedTrainer(lenet,
                                SGD(lr=args.lr_256 * args.batch_size / 256,
                                    momentum=0.9),
                                F.cross_entropy,
                                reporters=[TQDMReporter()])
    for _ in trainer.epoch_range(args.epochs):
        trainer.train(train_loader)
        trainer.test(test_loader)

    print(f"Max Accuracy={max(trainer.history['accuracy/test'])}")
コード例 #10
0
def main(cfg):
    if cfg.distributed.enable:
        init_distributed(use_horovod=cfg.distributed.use_horovod,
                         backend=cfg.distributed.backend,
                         init_method=cfg.distributed.init_method)
    if cfg.enable_accimage:
        enable_accimage()

    model = resnet50()
    optimizer = optim.SGD(lr=1e-1 * cfg.batch_size * get_num_nodes() / 256,
                          momentum=0.9,
                          weight_decay=1e-4)
    scheduler = lr_scheduler.MultiStepLR([30, 60, 80])
    tq = reporters.TQDMReporter(range(cfg.epochs))
    c = [
        callbacks.AccuracyCallback(),
        callbacks.AccuracyCallback(k=5),
        callbacks.LossCallback(), tq,
        reporters.TensorboardReporter("."),
        reporters.IOReporter(".")
    ]
    _train_loader, _test_loader = imagenet_loaders(
        cfg.root,
        cfg.batch_size,
        distributed=cfg.distributed.enable,
        num_train_samples=cfg.batch_size * 10 if cfg.debug else None,
        num_test_samples=cfg.batch_size * 10 if cfg.debug else None)

    use_multi_gpus = not cfg.distributed.enable and torch.cuda.device_count(
    ) > 1
    with SupervisedTrainer(model,
                           optimizer,
                           F.cross_entropy,
                           callbacks=c,
                           scheduler=scheduler,
                           data_parallel=use_multi_gpus,
                           use_horovod=cfg.distributed.use_horovod) as trainer:

        for epoch in tq:
            if cfg.use_prefetcher:
                train_loader = prefetcher.DataPrefetcher(_train_loader)
                test_loader = prefetcher.DataPrefetcher(_test_loader)
            else:
                train_loader, test_loader = _train_loader, _test_loader
            # following apex's training scheme
            trainer.train(train_loader)
            trainer.test(test_loader)