Exemplo n.º 1
0
def main():
    if args.distributed:
        init_distributed()

    model = se_resnet50(num_classes=1000)

    optimizer = optim.SGD(lr=0.6 / 1024 * args.batch_size,
                          momentum=0.9, weight_decay=1e-4)
    scheduler = lr_scheduler.MultiStepLR([50, 70])
    train_loader, test_loader = imagenet_loaders(args.root, args.batch_size, distributed=args.distributed,
                                                 num_train_samples=args.batch_size * 10 if args.debug else None,
                                                 num_test_samples=args.batch_size * 10 if args.debug else None)

    c = [callbacks.AccuracyCallback(), callbacks.AccuracyCallback(k=5),
         callbacks.LossCallback(),
         callbacks.WeightSave('.'),
         reporters.TensorboardReporter('.'),
         reporters.TQDMReporter(range(args.epochs))]

    with SupervisedTrainer(model, optimizer, F.cross_entropy,
                           callbacks=c,
                           scheduler=scheduler,
                           ) as trainer:
        for _ in c[-1]:
            trainer.train(train_loader)
            trainer.test(test_loader)
Exemplo n.º 2
0
def main():
    model = {
        "resnet20": resnet20,
        "wrn28_10": wrn28_10
    }[args.model](num_classes=10)
    weight_decay = {"resnet20": 1e-4, "wrn28_10": 5e-4}[args.model]
    lr_decay = {"resnet20": 0.1, "wrn28_10": 0.2}[args.model]
    train_loader, test_loader = cifar10_loaders(args.batch_size)
    optimizer = optim.SGD(lr=1e-1, momentum=0.9, weight_decay=weight_decay)
    scheduler = lr_scheduler.MultiStepLR([100, 150], gamma=lr_decay)
    tq = reporters.TQDMReporter(range(args.epochs), verb=True)
    c = [
        callbacks.AccuracyCallback(),
        callbacks.LossCallback(),
        reporters.IOReporter("."),
        reporters.TensorboardReporter("."),
        callbacks.WeightSave("."), tq
    ]

    with trainers.SupervisedTrainer(model,
                                    optimizer,
                                    F.cross_entropy,
                                    callbacks=c,
                                    scheduler=scheduler) as trainer:
        for _ in tq:
            trainer.train(train_loader)
            trainer.test(test_loader)
Exemplo n.º 3
0
def main():
    model = se_resnet50(num_classes=1000)

    optimizer = optim.SGD(lr=0.6 / 1024 * args.batch_size, momentum=0.9, weight_decay=1e-4)
    scheduler = lr_scheduler.MultiStepLR([50, 70])

    c = [callbacks.AccuracyCallback(), callbacks.LossCallback()]
    r = reporters.TQDMReporter(range(args.epochs), callbacks=c)
    tb = reporters.TensorboardReporter(c)
    rep = callbacks.CallbackList(r, tb, callbacks.WeightSave("checkpoints"))

    if args.distributed:
        # DistributedSupervisedTrainer sets up torch.distributed
        if args.local_rank == 0:
            print("\nuse DistributedDataParallel")
        trainer = DistributedSupervisedTrainer(model, optimizer, F.cross_entropy, callbacks=rep, scheduler=scheduler,
                                               init_method=args.init_method, backend=args.backend)
    else:
        multi_gpus = torch.cuda.device_count() > 1
        if multi_gpus:
            print("\nuse DataParallel")
        trainer = SupervisedTrainer(model, optimizer, F.cross_entropy, callbacks=rep,
                                    scheduler=scheduler, data_parallel=multi_gpus)
    # if distributed, need to setup loaders after DistributedSupervisedTrainer
    train_loader, test_loader = imagenet_loaders(args.root, args.batch_size, distributed=args.distributed,
                                                 num_train_samples=args.batch_size * 10 if args.debug else None,
                                                 num_test_samples=args.batch_size * 10 if args.debug else None)
    for _ in r:
        trainer.train(train_loader)
        trainer.test(test_loader)
Exemplo n.º 4
0
def main():
    if is_distributed():
        init_distributed()

    model = se_resnet50(num_classes=1000)

    optimizer = optim.SGD(lr=0.6 / 1024 * args.batch_size,
                          momentum=0.9,
                          weight_decay=1e-4)
    scheduler = lr_scheduler.MultiStepLR([50, 70])
    train_loader, test_loader = DATASET_REGISTRY("imagenet")(args.batch_size)

    c = [
        callbacks.AccuracyCallback(),
        callbacks.AccuracyCallback(k=5),
        callbacks.LossCallback(),
        callbacks.WeightSave("."),
        reporters.TensorboardReporter("."),
        reporters.TQDMReporter(range(args.epochs)),
    ]

    with SupervisedTrainer(
            model,
            optimizer,
            F.cross_entropy,
            callbacks=c,
            scheduler=scheduler,
    ) as trainer:
        for _ in c[-1]:
            trainer.train(train_loader)
            trainer.test(test_loader)
Exemplo n.º 5
0
def main():
    if args.distributed:
        init_distributed()
    if args.enable_accimage:
        enable_accimage()

    model = resnet50()
    optimizer = optim.SGD(lr=1e-1 * args.batch_size * get_num_nodes() / 256,
                          momentum=0.9,
                          weight_decay=1e-4)
    scheduler = lr_scheduler.MultiStepLR([30, 60, 80])
    c = [callbacks.AccuracyCallback(), callbacks.LossCallback()]
    r = reporters.TQDMReporter(range(args.epochs), callbacks=c)
    tb = reporters.TensorboardReporter(c)
    rep = callbacks.CallbackList(r, tb, callbacks.WeightSave("checkpoints"))
    _train_loader, _test_loader = imagenet_loaders(
        args.root,
        args.batch_size,
        distributed=args.distributed,
        num_train_samples=args.batch_size * 10 if args.debug else None,
        num_test_samples=args.batch_size * 10 if args.debug else None)

    if args.distributed:
        # DistributedSupervisedTrainer sets up torch.distributed
        if args.local_rank == 0:
            print("\nuse DistributedDataParallel\n")
        trainer = DistributedSupervisedTrainer(model,
                                               optimizer,
                                               F.cross_entropy,
                                               callbacks=rep,
                                               scheduler=scheduler,
                                               init_method=args.init_method,
                                               backend=args.backend,
                                               enable_amp=args.enable_amp)
    else:
        use_multi_gpus = torch.cuda.device_count() > 1
        if use_multi_gpus:
            print("\nuse DataParallel\n")
        trainer = SupervisedTrainer(model,
                                    optimizer,
                                    F.cross_entropy,
                                    callbacks=rep,
                                    data_parallel=use_multi_gpus)

    for epoch in r:
        if args.use_prefetcher:
            train_loader = prefetcher.DataPrefetcher(_train_loader)
            test_loader = prefetcher.DataPrefetcher(_test_loader)
        else:
            train_loader, test_loader = _train_loader, _test_loader
        # following apex's training scheme
        trainer.train(train_loader)
        trainer.test(test_loader)

    rep.close()
Exemplo n.º 6
0
def main(cfg):
    model = {
        "resnet20": resnet20,
        "wrn28_10": wrn28_10
    }[cfg.model](num_classes=10)
    weight_decay = {"resnet20": 1e-4, "wrn28_10": 5e-4}[cfg.model]
    lr_decay = {"resnet20": 0.1, "wrn28_10": 0.2}[cfg.model]
    train_loader, test_loader = vision_loaders("cifar10", cfg.batch_size)
    optimizer = None if cfg.bn_no_wd else optim.SGD(
        lr=1e-1, momentum=0.9, weight_decay=weight_decay)
    scheduler = lr_scheduler.MultiStepLR([100, 150], gamma=lr_decay)
    tq = reporters.TQDMReporter(range(cfg.epochs), verb=True)
    c = [
        callbacks.AccuracyCallback(),
        callbacks.LossCallback(),
        reporters.IOReporter("."),
        reporters.TensorboardReporter("."),
        callbacks.WeightSave("."), tq
    ]

    if cfg.bn_no_wd:

        def set_optimizer(trainer):
            bn_params = []
            non_bn_parameters = []
            for name, p in trainer.model.named_parameters():
                if "bn" in name:
                    bn_params.append(p)
                else:
                    non_bn_parameters.append(p)
            optim_params = [
                {
                    "params": bn_params,
                    "weight_decay": 0
                },
                {
                    "params": non_bn_parameters,
                    "weight_decay": weight_decay
                },
            ]
            trainer.optimizer = torch.optim.SGD(optim_params,
                                                lr=1e-1,
                                                momentum=0.9)

        trainers.SupervisedTrainer.set_optimizer = set_optimizer

    with trainers.SupervisedTrainer(model,
                                    optimizer,
                                    F.cross_entropy,
                                    callbacks=c,
                                    scheduler=scheduler) as trainer:

        for _ in tq:
            trainer.train(train_loader)
            trainer.test(test_loader)
Exemplo n.º 7
0
def test(tmp_path, rep, save_freq):
    temp_dir = tmp_path / "test"

    @callbacks.metric_callback_decorator
    def ca(data):
        output, target = data["output"], data["data"][1]
        return {
            i: v
            for i, v in enumerate(metrics.classwise_accuracy(output, target))
        }

    model = nn.Linear(10, 10)
    optimizer = optim.SGD(lr=0.1)

    c = callbacks.CallbackList(
        callbacks.AccuracyCallback(), ca,
        callbacks.WeightSave(save_path=temp_dir, save_freq=save_freq))
    epoch = range(1)
    loader = [(torch.randn(2, 10), torch.zeros(2, dtype=torch.long))
              for _ in range(10)]
    with {
            "tqdm": lambda: reporters.TQDMReporter(epoch, c, temp_dir),
            "logger": lambda: reporters.LoggerReporter(c, temp_dir),
            "tensorboard": lambda: reporters.TensorboardReporter(c, temp_dir)
    }[rep]() as _rep:
        tr = trainers.SupervisedTrainer(model,
                                        optimizer,
                                        F.cross_entropy,
                                        callbacks=_rep,
                                        verb=False)
        if rep == "tqdm":
            epoch = _rep
        for _ in epoch:
            tr.train(loader)
            tr.test(loader)
        tr.exit()

    try:
        # .../test/**/0.pkl
        save_file = list(Path(temp_dir).glob("*/*.pkl"))[0]
    except IndexError as e:
        print(list(Path(temp_dir).glob("*/*")))
        raise e
    tr.resume(save_file)

    c = callbacks.AccuracyCallback()
    with {
            "tqdm": lambda: reporters.TQDMReporter(epoch, c, temp_dir),
            "logger": lambda: reporters.LoggerReporter(c, temp_dir),
            "tensorboard": lambda: reporters.TensorboardReporter(c, temp_dir)
    }[rep]() as _rep:
        inferencer = Inferencer(model, _rep)
        inferencer.load(save_file)
        inferencer.run(loader)
def test(rep):
    tmpdir = str(gettempdir())
    if rep == "tensorboard" and not is_tensorboardX_available:
        pytest.skip("tensorboardX is not available")

    @callbacks.metric_callback_decorator
    def ca(data):
        output, target = data["output"], data["data"][1]
        return {
            i: v
            for i, v in enumerate(metrics.classwise_accuracy(output, target))
        }

    model = nn.Linear(10, 10)
    optimizer = optim.SGD(lr=0.1)

    c = callbacks.CallbackList(callbacks.AccuracyCallback(), ca,
                               callbacks.WeightSave(tmpdir))
    epoch = range(1)
    loader = [(torch.randn(2, 10), torch.zeros(2, dtype=torch.long))
              for _ in range(10)]
    with {
            "tqdm": lambda: reporters.TQDMReporter(epoch, c, tmpdir),
            "logger": lambda: reporters.LoggerReporter(c, tmpdir),
            "tensorboard": lambda: reporters.TensorboardReporter(c, tmpdir)
    }[rep]() as _rep:
        tr = trainers.SupervisedTrainer(model,
                                        optimizer,
                                        F.cross_entropy,
                                        callbacks=_rep,
                                        verb=False)
        if rep == "tqdm":
            epoch = _rep
        for _ in epoch:
            tr.train(loader)
            tr.test(loader)

    save_file = list(Path(tmpdir).glob("*/*.pkl"))[0]
    tr.resume(save_file)

    c = callbacks.AccuracyCallback()
    with {
            "tqdm": lambda: reporters.TQDMReporter(epoch, c, tmpdir),
            "logger": lambda: reporters.LoggerReporter(c, tmpdir),
            "tensorboard": lambda: reporters.TensorboardReporter(c, tmpdir)
    }[rep]() as _rep:
        inferencer = Inferencer(model, _rep)
        inferencer.load(save_file)
        inferencer.run(loader)
Exemplo n.º 9
0
def main():
    train_loader, test_loader = get_dataloader(args.batch_size, args.root)
    gpus = list(range(torch.cuda.device_count()))
    se_resnet = nn.DataParallel(se_resnet50(num_classes=1000),
                                device_ids=gpus)
    optimizer = optim.SGD(lr=0.6 / 1024 * args.batch_size, momentum=0.9, weight_decay=1e-4)
    scheduler = lr_scheduler.StepLR(30, gamma=0.1)
    weight_saver = callbacks.WeightSave("checkpoints")
    tqdm_rep = reporter.TQDMReporter(range(args.epochs), callbacks=[callbacks.AccuracyCallback()])

    trainer = Trainer(se_resnet, optimizer, F.cross_entropy, scheduler=scheduler,
                      callbacks=callbacks.CallbackList(weight_saver, tqdm_rep))
    for _ in tqdm_rep:
        trainer.train(train_loader)
        trainer.test(test_loader)
Exemplo n.º 10
0
def main(batch_size):
    layers = ["layer1.0.conv1", "layer2.0.conv1", "layer3.0.conv1", "fc"]
    train_loader, test_loader = cifar10_loaders(128)
    weight_save = callbacks.WeightSave("checkpoints")
    model = resnet20(num_classes=10)
    model2 = deepcopy(model)
    optimizer = torch.optim.SGD(params=model.parameters(),
                                lr=0.1,
                                momentum=0.9,
                                weight_decay=1e-4)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 50)
    trainer = trainers.SupervisedTrainer(model,
                                         optimizer,
                                         F.cross_entropy,
                                         scheduler=scheduler,
                                         callbacks=weight_save,
                                         verb=False)
    for ep in trange(100, ncols=80):
        trainer.train(train_loader)

    hooks1 = [CCAHook(model, name, svd_device=args.device) for name in layers]
    hooks2 = [CCAHook(model2, name, svd_device=args.device) for name in layers]
    device = next(model.parameters()).device
    model2.to(device)
    input = hooks1[0].data(train_loader.dataset,
                           batch_size=batch_size).to(device)
    history = []

    def distance():
        model.eval()
        model2.eval()
        with torch.no_grad():
            model(input)
            model2(input)
        return [h1.distance(h2) for h1, h2 in zip(hooks1, hooks2)]

    # 0 and 99
    history.append(distance())

    # 29 and 99, ...
    for ep in (29, 49, 99):
        saved = torch.load(weight_save.save_path / f"{ep}.pkl")
        model2.load_state_dict(saved["model"])
        history.append(distance())
    plot(history, layers)