コード例 #1
0
def get_components(cfg):
    labeled_loader, unlabeled_loader, val_loader, test_loader = get_dataloader(cfg.data.name,
                                                                               cfg.data.labeled_size,
                                                                               cfg.data.unlabeled_size,
                                                                               cfg.data.val_size,
                                                                               cfg.data.batch_size,
                                                                               cfg.data.random_state,
                                                                               download=cfg.data.download,
                                                                               pilaugment=cfg.data.get('pilaugment',
                                                                                                       False)
                                                                               )

    model = wrn28_2(num_classes=6 if cfg.data.name == "animal" else 10)
    optimizer = {'adam': optim.Adam(lr=cfg.optim.lr),
                 'sgd': optim.SGD(lr=cfg.optim.lr, momentum=0.9)}[cfg.optim.name]
    scheduler = {'adam': None,
                 'sgd': lr_scheduler.CosineAnnealingWithWarmup(cfg.optim.epochs,
                                                               4, cfg.optim.epochs // 100)}[cfg.optim.name]
    ema_model = partial(EMAModel, ema_rate=cfg.model.ema_rate, weight_decay=cfg.optim.wd * cfg.optim.lr)
    num_classes = {"animal": 6, "cifar100": 100, "tinyimagenet": 200}.get(cfg.data.name, 10)
    tq = reporters.TQDMReporter(range(cfg.optim.epochs))
    _callbacks = [callbacks.AccuracyCallback(),
                  callbacks.LossCallback(),
                  reporters.IOReporter("."),
                  reporters.TensorboardReporter("."), tq]
    return PackedLoader(labeled_loader, unlabeled_loader), val_loader, test_loader, model, optimizer, \
           scheduler, ema_model, num_classes, tq, _callbacks
コード例 #2
0
ファイル: gap.py プロジェクト: ddghost/neural_network_model
def main():
    train_loader, test_loader = imagenet_loaders(
        args.root,
        args.batch_size,
        num_train_samples=args.batch_size * args.train_max_iter,
        num_test_samples=args.batch_size * args.test_max_iter)
    pretrained_model = resnet50(pretrained=True)
    for p in pretrained_model.parameters():
        p.requires_grad = False
    pretrained_model.eval()

    generator = ResNetGenerator(3, 3, args.num_filters)
    generator.cuda()
    optimizer = optim.Adam(lr=args.lr, betas=(args.beta1, 0.999))
    trainer = Trainer({
        "generator": generator,
        "classifier": pretrained_model
    },
                      optimizer,
                      reporter.TensorboardReporter([
                          adv_accuracy, fooling_rate,
                          callbacks.AccuracyCallback(),
                          callbacks.LossCallback()
                      ],
                                                   save_dir="results"),
                      noise=torch.randn(3, 224,
                                        224).expand(args.batch_size, -1, -1,
                                                    -1))
    for ep in range(args.epochs):
        trainer.train(train_loader)
        trainer.test(test_loader)
コード例 #3
0
ファイル: supervised.py プロジェクト: Jawae/ssl-suite
def main(cfg):
    model = wrn28_2(num_classes=10)
    train_loader, test_loader = get_dataloaders(cfg.data.name,
                                                cfg.data.batch_size,
                                                cfg.data.train_size,
                                                cfg.data.random_state)
    optimizer = optim.Adam(lr=cfg.optim.lr)
    tq = reporters.TQDMReporter(range(cfg.optim.epochs))
    c = [
        callbacks.AccuracyCallback(),
        callbacks.LossCallback(),
        reporters.IOReporter("."), tq
    ]

    with SupervisedTrainer(
            model,
            optimizer,
            F.cross_entropy,
            callbacks=c,
            ema_model=partial(EMAModel,
                              ema_rate=cfg.model.ema_rate,
                              weight_decay=cfg.optim.wd * cfg.optim.lr),
    ) as trainer:
        for _ in tq:
            trainer.train(train_loader)
            trainer.test(test_loader)
        trainer.logger.info(
            f"test accuracy: {median(c[0].history['test'][-20:])}")
コード例 #4
0
ファイル: search.py プロジェクト: taikiinoue45/dda
def search(cfg: BaseConfig):
    train_loader, _, num_classes = DATASET_REGISTRY(cfg.data.name)(
        batch_size=cfg.data.batch_size,
        train_size=cfg.data.train_size,
        drop_last=True,
        download=cfg.data.download,
        return_num_classes=True,
        num_workers=4)
    model = {
        'main':
        Discriminator(MODEL_REGISTRY('wrn40_2')(num_classes)),
        'policy':
        Policy.faster_auto_augment_policy(cfg.model.num_sub_policies,
                                          cfg.model.temperature,
                                          cfg.model.operation_count,
                                          cfg.model.num_chunks)
    }
    optimizer = {
        'main': optim.Adam(lr=cfg.optim.main_lr, betas=(0, 0.999)),
        'policy': optim.Adam(lr=cfg.optim.policy_lr, betas=(0, 0.999))
    }
    tqdm = callbacks.TQDMReporter(range(cfg.optim.epochs))
    c = [
        callbacks.LossCallback(),  # classification loss
        callbacks.metric_callback_by_name('d_loss'),  # discriminator loss
        callbacks.metric_callback_by_name('a_loss'),  # augmentation loss
        tqdm
    ]
    with AdvTrainer(model,
                    optimizer,
                    F.cross_entropy,
                    callbacks=c,
                    cfg=cfg.model,
                    use_cuda_nonblocking=True) as trainer:
        for _ in tqdm:
            trainer.train(train_loader)
        trainer.save(
            pathlib.Path(hydra.utils.get_original_cwd()) / 'policy_weights' /
            cfg.data.name)
コード例 #5
0
if __name__ == '__main__':
    import miniargs
    from torch.nn import functional as F

    p = miniargs.ArgumentParser()
    p.add_int("--batch_size", default=128)
    p.add_int("--epochs", default=300)
    p.add_str("--optimizer", choices=["sgd", "adam"])
    p.add_float("--lr", default=1e-2)
    p.add_multi_str("--group", default=["conv1", "layer1", "layer2", "layer3"])
    p.add_int("--step", default=50)
    p.add_int("--num_convs", default=3)
    p.add_int("--num_fcs", default=3)
    args = p.parse()

    optimizer = {"adam": optim.Adam(lr=3e-4, weight_decay=1e-4),
                 "sgd": optim.SGD(lr=args.lr, momentum=0.9, weight_decay=1e-4)}[args.optimizer]

    train_loader, test_loader = cifar10_loaders(args.batch_size)
    resnet = module_converter(resnet56(num_classes=10), keys=["conv1", "bn1", "relu", "layer1", "layer2", "layer3"])
    aux = nn.ModuleDict(OrderedDict({k: v for k, v in {
        # 32x32
        "conv1": generate_aux(32, 16, 10, args.num_convs, args.num_fcs),
        # 32x32
        "layer1": generate_aux(32, 16, 10, args.num_convs, args.num_fcs),
        # 16x16
        "layer2": generate_aux(16, 32, 10, args.num_convs, args.num_fcs),
        # 8x8
        "layer3": generate_aux(8, 64, 10, args.num_convs, args.num_fcs),
    }.items() if k in args.group}))
    model = NaiveGreedyModule(resnet, aux=aux,