def main(): if args.distributed: init_distributed() model = se_resnet50(num_classes=1000) optimizer = optim.SGD(lr=0.6 / 1024 * args.batch_size, momentum=0.9, weight_decay=1e-4) scheduler = lr_scheduler.MultiStepLR([50, 70]) train_loader, test_loader = imagenet_loaders(args.root, args.batch_size, distributed=args.distributed, num_train_samples=args.batch_size * 10 if args.debug else None, num_test_samples=args.batch_size * 10 if args.debug else None) c = [callbacks.AccuracyCallback(), callbacks.AccuracyCallback(k=5), callbacks.LossCallback(), callbacks.WeightSave('.'), reporters.TensorboardReporter('.'), reporters.TQDMReporter(range(args.epochs))] with SupervisedTrainer(model, optimizer, F.cross_entropy, callbacks=c, scheduler=scheduler, ) as trainer: for _ in c[-1]: trainer.train(train_loader) trainer.test(test_loader)
def main(): model = { "resnet20": resnet20, "wrn28_10": wrn28_10 }[args.model](num_classes=10) weight_decay = {"resnet20": 1e-4, "wrn28_10": 5e-4}[args.model] lr_decay = {"resnet20": 0.1, "wrn28_10": 0.2}[args.model] train_loader, test_loader = cifar10_loaders(args.batch_size) optimizer = optim.SGD(lr=1e-1, momentum=0.9, weight_decay=weight_decay) scheduler = lr_scheduler.MultiStepLR([100, 150], gamma=lr_decay) tq = reporters.TQDMReporter(range(args.epochs), verb=True) c = [ callbacks.AccuracyCallback(), callbacks.LossCallback(), reporters.IOReporter("."), reporters.TensorboardReporter("."), callbacks.WeightSave("."), tq ] with trainers.SupervisedTrainer(model, optimizer, F.cross_entropy, callbacks=c, scheduler=scheduler) as trainer: for _ in tq: trainer.train(train_loader) trainer.test(test_loader)
def get_components(cfg): labeled_loader, unlabeled_loader, val_loader, test_loader = get_dataloader(cfg.data.name, cfg.data.labeled_size, cfg.data.unlabeled_size, cfg.data.val_size, cfg.data.batch_size, cfg.data.random_state, download=cfg.data.download, pilaugment=cfg.data.get('pilaugment', False) ) model = wrn28_2(num_classes=6 if cfg.data.name == "animal" else 10) optimizer = {'adam': optim.Adam(lr=cfg.optim.lr), 'sgd': optim.SGD(lr=cfg.optim.lr, momentum=0.9)}[cfg.optim.name] scheduler = {'adam': None, 'sgd': lr_scheduler.CosineAnnealingWithWarmup(cfg.optim.epochs, 4, cfg.optim.epochs // 100)}[cfg.optim.name] ema_model = partial(EMAModel, ema_rate=cfg.model.ema_rate, weight_decay=cfg.optim.wd * cfg.optim.lr) num_classes = {"animal": 6, "cifar100": 100, "tinyimagenet": 200}.get(cfg.data.name, 10) tq = reporters.TQDMReporter(range(cfg.optim.epochs)) _callbacks = [callbacks.AccuracyCallback(), callbacks.LossCallback(), reporters.IOReporter("."), reporters.TensorboardReporter("."), tq] return PackedLoader(labeled_loader, unlabeled_loader), val_loader, test_loader, model, optimizer, \ scheduler, ema_model, num_classes, tq, _callbacks
def train_and_eval(cfg): train_loader, val_loader, test_loader, num_classes = get_dataloader( cfg.data.name, cfg.data.val_size, cfg.data.batch_size, cfg.data.download, cfg.augment, False) model = get_model(cfg.model.name, num_classes) optimizer = optim.SGD(cfg.optim.model.lr, momentum=0.9, weight_decay=cfg.optim.model.weight_decay) scheduler = lr_scheduler.MultiStepLR(cfg.optim.model.steps) tq = reporters.TQDMReporter(range(cfg.optim.epochs), verb=cfg.verb) callback = [ callbacks.AccuracyCallback(), callbacks.LossCallback(), reporters.TensorboardReporter("."), reporters.IOReporter("."), tq ] with trainers.SupervisedTrainer(model, optimizer, F.cross_entropy, callbacks=callback, scheduler=scheduler) as trainer: for ep in tq: trainer.train(train_loader) trainer.test(val_loader, 'val') trainer.test(test_loader)
def main(): train_loader, test_loader = imagenet_loaders( args.root, args.batch_size, num_train_samples=args.batch_size * args.train_max_iter, num_test_samples=args.batch_size * args.test_max_iter) pretrained_model = resnet50(pretrained=True) for p in pretrained_model.parameters(): p.requires_grad = False pretrained_model.eval() generator = ResNetGenerator(3, 3, args.num_filters) generator.cuda() optimizer = optim.Adam(lr=args.lr, betas=(args.beta1, 0.999)) trainer = Trainer({ "generator": generator, "classifier": pretrained_model }, optimizer, reporter.TensorboardReporter([ adv_accuracy, fooling_rate, callbacks.AccuracyCallback(), callbacks.LossCallback() ], save_dir="results"), noise=torch.randn(3, 224, 224).expand(args.batch_size, -1, -1, -1)) for ep in range(args.epochs): trainer.train(train_loader) trainer.test(test_loader)
def main(): model = se_resnet50(num_classes=1000) optimizer = optim.SGD(lr=0.6 / 1024 * args.batch_size, momentum=0.9, weight_decay=1e-4) scheduler = lr_scheduler.MultiStepLR([50, 70]) c = [callbacks.AccuracyCallback(), callbacks.LossCallback()] r = reporters.TQDMReporter(range(args.epochs), callbacks=c) tb = reporters.TensorboardReporter(c) rep = callbacks.CallbackList(r, tb, callbacks.WeightSave("checkpoints")) if args.distributed: # DistributedSupervisedTrainer sets up torch.distributed if args.local_rank == 0: print("\nuse DistributedDataParallel") trainer = DistributedSupervisedTrainer(model, optimizer, F.cross_entropy, callbacks=rep, scheduler=scheduler, init_method=args.init_method, backend=args.backend) else: multi_gpus = torch.cuda.device_count() > 1 if multi_gpus: print("\nuse DataParallel") trainer = SupervisedTrainer(model, optimizer, F.cross_entropy, callbacks=rep, scheduler=scheduler, data_parallel=multi_gpus) # if distributed, need to setup loaders after DistributedSupervisedTrainer train_loader, test_loader = imagenet_loaders(args.root, args.batch_size, distributed=args.distributed, num_train_samples=args.batch_size * 10 if args.debug else None, num_test_samples=args.batch_size * 10 if args.debug else None) for _ in r: trainer.train(train_loader) trainer.test(test_loader)
def main(): if is_distributed(): init_distributed() model = se_resnet50(num_classes=1000) optimizer = optim.SGD(lr=0.6 / 1024 * args.batch_size, momentum=0.9, weight_decay=1e-4) scheduler = lr_scheduler.MultiStepLR([50, 70]) train_loader, test_loader = DATASET_REGISTRY("imagenet")(args.batch_size) c = [ callbacks.AccuracyCallback(), callbacks.AccuracyCallback(k=5), callbacks.LossCallback(), callbacks.WeightSave("."), reporters.TensorboardReporter("."), reporters.TQDMReporter(range(args.epochs)), ] with SupervisedTrainer( model, optimizer, F.cross_entropy, callbacks=c, scheduler=scheduler, ) as trainer: for _ in c[-1]: trainer.train(train_loader) trainer.test(test_loader)
def main(cfg): model = wrn28_2(num_classes=10) train_loader, test_loader = get_dataloaders(cfg.data.name, cfg.data.batch_size, cfg.data.train_size, cfg.data.random_state) optimizer = optim.Adam(lr=cfg.optim.lr) tq = reporters.TQDMReporter(range(cfg.optim.epochs)) c = [ callbacks.AccuracyCallback(), callbacks.LossCallback(), reporters.IOReporter("."), tq ] with SupervisedTrainer( model, optimizer, F.cross_entropy, callbacks=c, ema_model=partial(EMAModel, ema_rate=cfg.model.ema_rate, weight_decay=cfg.optim.wd * cfg.optim.lr), ) as trainer: for _ in tq: trainer.train(train_loader) trainer.test(test_loader) trainer.logger.info( f"test accuracy: {median(c[0].history['test'][-20:])}")
def main(): if args.distributed: init_distributed() if args.enable_accimage: enable_accimage() model = resnet50() optimizer = optim.SGD(lr=1e-1 * args.batch_size * get_num_nodes() / 256, momentum=0.9, weight_decay=1e-4) scheduler = lr_scheduler.MultiStepLR([30, 60, 80]) c = [callbacks.AccuracyCallback(), callbacks.LossCallback()] r = reporters.TQDMReporter(range(args.epochs), callbacks=c) tb = reporters.TensorboardReporter(c) rep = callbacks.CallbackList(r, tb, callbacks.WeightSave("checkpoints")) _train_loader, _test_loader = imagenet_loaders( args.root, args.batch_size, distributed=args.distributed, num_train_samples=args.batch_size * 10 if args.debug else None, num_test_samples=args.batch_size * 10 if args.debug else None) if args.distributed: # DistributedSupervisedTrainer sets up torch.distributed if args.local_rank == 0: print("\nuse DistributedDataParallel\n") trainer = DistributedSupervisedTrainer(model, optimizer, F.cross_entropy, callbacks=rep, scheduler=scheduler, init_method=args.init_method, backend=args.backend, enable_amp=args.enable_amp) else: use_multi_gpus = torch.cuda.device_count() > 1 if use_multi_gpus: print("\nuse DataParallel\n") trainer = SupervisedTrainer(model, optimizer, F.cross_entropy, callbacks=rep, data_parallel=use_multi_gpus) for epoch in r: if args.use_prefetcher: train_loader = prefetcher.DataPrefetcher(_train_loader) test_loader = prefetcher.DataPrefetcher(_test_loader) else: train_loader, test_loader = _train_loader, _test_loader # following apex's training scheme trainer.train(train_loader) trainer.test(test_loader) rep.close()
def main(cfg): model = { "resnet20": resnet20, "wrn28_10": wrn28_10 }[cfg.model](num_classes=10) weight_decay = {"resnet20": 1e-4, "wrn28_10": 5e-4}[cfg.model] lr_decay = {"resnet20": 0.1, "wrn28_10": 0.2}[cfg.model] train_loader, test_loader = vision_loaders("cifar10", cfg.batch_size) optimizer = None if cfg.bn_no_wd else optim.SGD( lr=1e-1, momentum=0.9, weight_decay=weight_decay) scheduler = lr_scheduler.MultiStepLR([100, 150], gamma=lr_decay) tq = reporters.TQDMReporter(range(cfg.epochs), verb=True) c = [ callbacks.AccuracyCallback(), callbacks.LossCallback(), reporters.IOReporter("."), reporters.TensorboardReporter("."), callbacks.WeightSave("."), tq ] if cfg.bn_no_wd: def set_optimizer(trainer): bn_params = [] non_bn_parameters = [] for name, p in trainer.model.named_parameters(): if "bn" in name: bn_params.append(p) else: non_bn_parameters.append(p) optim_params = [ { "params": bn_params, "weight_decay": 0 }, { "params": non_bn_parameters, "weight_decay": weight_decay }, ] trainer.optimizer = torch.optim.SGD(optim_params, lr=1e-1, momentum=0.9) trainers.SupervisedTrainer.set_optimizer = set_optimizer with trainers.SupervisedTrainer(model, optimizer, F.cross_entropy, callbacks=c, scheduler=scheduler) as trainer: for _ in tq: trainer.train(train_loader) trainer.test(test_loader)
def main(cfg): if cfg.distributed.enable: init_distributed(use_horovod=cfg.distributed.use_horovod, backend=cfg.distributed.backend, init_method=cfg.distributed.init_method) if cfg.enable_accimage: enable_accimage() model = resnet50() optimizer = optim.SGD(lr=1e-1 * cfg.batch_size * get_num_nodes() / 256, momentum=0.9, weight_decay=1e-4) scheduler = lr_scheduler.MultiStepLR([30, 60, 80]) tq = reporters.TQDMReporter(range(cfg.epochs)) c = [ callbacks.AccuracyCallback(), callbacks.AccuracyCallback(k=5), callbacks.LossCallback(), tq, reporters.TensorboardReporter("."), reporters.IOReporter(".") ] _train_loader, _test_loader = imagenet_loaders( cfg.root, cfg.batch_size, distributed=cfg.distributed.enable, num_train_samples=cfg.batch_size * 10 if cfg.debug else None, num_test_samples=cfg.batch_size * 10 if cfg.debug else None) use_multi_gpus = not cfg.distributed.enable and torch.cuda.device_count( ) > 1 with SupervisedTrainer(model, optimizer, F.cross_entropy, callbacks=c, scheduler=scheduler, data_parallel=use_multi_gpus, use_horovod=cfg.distributed.use_horovod) as trainer: for epoch in tq: if cfg.use_prefetcher: train_loader = prefetcher.DataPrefetcher(_train_loader) test_loader = prefetcher.DataPrefetcher(_test_loader) else: train_loader, test_loader = _train_loader, _test_loader # following apex's training scheme trainer.train(train_loader) trainer.test(test_loader)
def train_and_eval(cfg: BaseConfig): if cfg.path is None: print('cfg.path is None, so FasterAutoAugment is not used') policy = None else: path = Path(hydra.utils.get_original_cwd()) / cfg.path assert path.exists() policy_weight = torch.load(path, map_location='cpu') policy = Policy.faster_auto_augment_policy( num_chunks=cfg.model.num_chunks, **policy_weight['policy_kwargs']) policy.load_state_dict(policy_weight['policy']) train_loader, test_loader, num_classes = DATASET_REGISTRY(cfg.data.name)( batch_size=cfg.data.batch_size, drop_last=True, download=cfg.data.download, return_num_classes=True, norm=[ transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ], num_workers=4) model = MODEL_REGISTRY(cfg.model.name)(num_classes) optimizer = optim.SGD(cfg.optim.lr, momentum=cfg.optim.momentum, weight_decay=cfg.optim.weight_decay, nesterov=cfg.optim.nesterov) scheduler = lr_scheduler.CosineAnnealingWithWarmup( cfg.optim.epochs, cfg.optim.scheduler.mul, cfg.optim.scheduler.warmup) tqdm = callbacks.TQDMReporter(range(cfg.optim.epochs)) c = [callbacks.LossCallback(), callbacks.AccuracyCallback(), tqdm] with EvalTrainer(model, optimizer, F.cross_entropy, callbacks=c, scheduler=scheduler, policy=policy, cfg=cfg.model, use_cuda_nonblocking=True) as trainer: for _ in tqdm: trainer.train(train_loader) trainer.test(test_loader) print(f"Min. Error Rate: {1 - max(c[1].history['test']):.3f}")
def main(): Trainer = trainers.SupervisedTrainer if args.baseline else MixupTrainer model = MODELS[args.model](num_classes=NUMCLASSES[args.dataset]) train_loader, test_loader = DATASETS[args.dataset](args.batch_size) optimizer = optim.SGD(lr=1e-1, momentum=0.9, weight_decay=1e-4) scheduler = lr_scheduler.MultiStepLR(args.steps, gamma=0.1) c = [callbacks.AccuracyCallback(), callbacks.LossCallback()] with reporters.TQDMReporter( range(args.epochs), callbacks=c) as tq, reporters.TensorboardReporter(c) as tb: trainer = Trainer(model, optimizer, naive_cross_entropy_loss, callbacks=[tq, tb], scheduler=scheduler, alpha=args.alpha, num_classes=NUMCLASSES[args.dataset]) for _ in tq: trainer.train(train_loader) trainer.test(test_loader)
def main(): model = MODELS[args.teacher_model](num_classes=10) train_loader, test_loader = cifar10_loaders(args.batch_size) weight_decay = 1e-4 if "resnet" in args.teacher_model else 5e-4 lr_decay = 0.1 if "resnet" in args.teacher_model else 0.2 optimizer = optim.SGD(lr=1e-1, momentum=0.9, weight_decay=weight_decay) scheduler = lr_scheduler.MultiStepLR([50, 80], gamma=lr_decay) trainer = trainers.SupervisedTrainer(model, optimizer, F.cross_entropy, scheduler=scheduler) trainer.logger.info("Train the teacher model!") for _ in trange(args.teacher_epochs, ncols=80): trainer.train(train_loader) trainer.test(test_loader) teacher_model = model.eval() weight_decay = 1e-4 if "resnet" in args.student_model else 5e-4 lr_decay = 0.1 if "resnet" in args.student_model else 0.2 optimizer = optim.SGD(lr=1e-1, momentum=0.9, weight_decay=weight_decay) scheduler = lr_scheduler.MultiStepLR([50, 80], gamma=lr_decay) model = MODELS[args.student_model](num_classes=10) c = [callbacks.AccuracyCallback(), callbacks.LossCallback(), kl_loss] with reporters.TQDMReporter( range(args.student_epochs), callbacks=c) as tq, reporters.TensorboardReporter(c) as tb: trainer = DistillationTrainer(model, optimizer, F.cross_entropy, callbacks=[tq, tb], scheduler=scheduler, teacher_model=teacher_model, temperature=args.temperature) trainer.logger.info("Train the student model!") for _ in tq: trainer.train(train_loader) trainer.test(test_loader)
def search(cfg: BaseConfig): train_loader, _, num_classes = DATASET_REGISTRY(cfg.data.name)( batch_size=cfg.data.batch_size, train_size=cfg.data.train_size, drop_last=True, download=cfg.data.download, return_num_classes=True, num_workers=4) model = { 'main': Discriminator(MODEL_REGISTRY('wrn40_2')(num_classes)), 'policy': Policy.faster_auto_augment_policy(cfg.model.num_sub_policies, cfg.model.temperature, cfg.model.operation_count, cfg.model.num_chunks) } optimizer = { 'main': optim.Adam(lr=cfg.optim.main_lr, betas=(0, 0.999)), 'policy': optim.Adam(lr=cfg.optim.policy_lr, betas=(0, 0.999)) } tqdm = callbacks.TQDMReporter(range(cfg.optim.epochs)) c = [ callbacks.LossCallback(), # classification loss callbacks.metric_callback_by_name('d_loss'), # discriminator loss callbacks.metric_callback_by_name('a_loss'), # augmentation loss tqdm ] with AdvTrainer(model, optimizer, F.cross_entropy, callbacks=c, cfg=cfg.model, use_cuda_nonblocking=True) as trainer: for _ in tqdm: trainer.train(train_loader) trainer.save( pathlib.Path(hydra.utils.get_original_cwd()) / 'policy_weights' / cfg.data.name)