Exemple #1
0
def main(cfg: omegaconf.DictConfig) -> None:
    # setup device
    device = "cuda" if cfg.gpus > 0 else "cpu"

    # setup dataset
    transform = shared.get_transform(cfg.dataset.input_size,
                                     cfg.dataset.mean,
                                     cfg.dataset.std,
                                     train=False)
    dataset_root = os.path.join(
        hydra.utils.get_original_cwd(), "data"
    )  # this is needed because hydra automatically change working directory.

    df = eval_cifar(
        root=dataset_root,
        transform=transform,
        batch_size=cfg.batch_size,
        norm=cfg.norm,
        savedir=cfg.savedir,
        device=device,
        corruptions=cfg.dataset.corruptions,
    )

    # save to csv
    df.to_csv("corruption_norm_{name}_{norm}.csv".format(name=cfg.dataset.name,
                                                         norm=cfg.norm))

    # save as plot
    result_dict = dict(zip(df["corruption"], df["norm"]))
    create_barplot(result_dict, "mean corruption norm", "corruption_norm.png")
def main(cfg: omegaconf.DictConfig) -> None:
    """
    Main entry point function to evaluate corruption robustness.
    """
    # setup device
    device = "cuda" if cfg.gpus > 0 else "cpu"

    # setup model
    model = shared.get_model(name=cfg.arch,
                             num_classes=cfg.dataset.num_classes)
    shared.load_model(model, cfg.weight)
    model = model.to(device)
    model.eval()

    # setup dataset
    transform = shared.get_transform(cfg.dataset.input_size,
                                     cfg.dataset.mean,
                                     cfg.dataset.std,
                                     train=False)
    dataset_root = os.path.join(
        hydra.utils.get_original_cwd(), "data"
    )  # this is needed because hydra automatically change working directory.

    if cfg.dataset.name in {"cifar10-c", "cifar100-c"}:
        df = eval_cifar(
            name=cfg.dataset.name,
            model=model,
            root=dataset_root,
            transform=transform,
            batch_size=cfg.batch_size,
            savedir=cfg.savedir,
            device=device,
            corruptions=cfg.dataset.corruptions,
        )
    elif cfg.dataset.name in {"imagenet-c", "imagenet100-c"}:
        df = eval_imagenet(
            name=cfg.dataset.name,
            model=model,
            root=dataset_root,
            transform=transform,
            batch_size=cfg.batch_size,
            savedir=cfg.savedir,
            device=device,
            corruptions=cfg.dataset.corruptions,
        )
    else:
        raise NotImplementedError

    # save to csv
    df.to_csv("corruption_error_{name}.csv".format(name=cfg.dataset.name))

    # save as plot
    result_dict = dict(zip(df["corruption"], df["err1"]))
    create_barplot(result_dict, "Corruption error", "corruption_error.png")
Exemple #3
0
    mean = [0.49139968, 0.48215841, 0.44653091]
    std = [0.24703223, 0.24348513, 0.26158784]
    num_iteration = 7
    eps_max = 8.0
    step_size = eps_max / math.sqrt(num_iteration)
    norm = "linf"
    rand_init = True
    scale_each = True
    scale_eps = False
    avoid_target = True
    criterion = torch.nn.CrossEntropyLoss()

    transform = shared.get_transform(
        input_size,
        mean=mean,
        std=std,
        train=False,
        normalize=False,
    )
    dataset = torchvision.datasets.CIFAR10(root="../../data/cifar10",
                                           train=False,
                                           download=False,
                                           transform=transform)
    loader = torch.utils.data.DataLoader(dataset,
                                         32,
                                         shuffle=False,
                                         num_workers=8)

    device = "cuda" if torch.cuda.is_available() else "cpu"
    weightpath = "../testdata/weight_cifar10_wideresnet40_100ep.pth"
    model = shared.get_model(name="wideresnet40", num_classes=10)
def main(cfg: omegaconf.DictConfig) -> None:
    """
    Entry point function for training models.
    """
    # show config
    logging.info(cfg.pretty())

    # setup loggers
    api_key = os.environ.get("ONLINE_LOGGER_API_KEY")
    loggers = pytorch_fourier_analysis.lit.get_loggers(cfg, api_key)
    for logger in loggers:
        logger.log_hyperparams(omegaconf.OmegaConf.to_container(cfg))

    # setup checkpoint callback and trainer
    checkpoint_callback = pytorch_fourier_analysis.lit.get_checkpoint_callback(
        cfg.savedir, monitor=cfg.checkpoint_monitor, mode=cfg.checkpoint_mode)

    trainer = pl.Trainer(
        deterministic=False,
        benchmark=True,
        fast_dev_run=False,
        gpus=cfg.gpus,
        num_nodes=cfg.num_nodes,
        distributed_backend=cfg.
        distributed_backend,  # check https://pytorch-lightning.readthedocs.io/en/stable/trainer.html#distributed-backend
        max_epochs=cfg.epochs,
        min_epochs=cfg.epochs,
        logger=loggers,
        callbacks=[LitTrainerCallback()],
        checkpoint_callback=checkpoint_callback,
        default_root_dir=cfg.savedir,
        weights_save_path=cfg.savedir,
        resume_from_checkpoint=cfg.resume_ckpt_path if "resume_ckpt_path"
        in cfg.keys() else None,  # if not None, resume from checkpoint
    )

    # setup model
    model = shared.get_model(name=cfg.arch,
                             num_classes=cfg.dataset.num_classes)

    # setup noise augmentation
    noiseaugment = shared.get_noiseaugment(cfg.noiseaugment)
    optional_transform = [noiseaugment] if noiseaugment else []

    # setup dataset
    train_transform = shared.get_transform(
        cfg.dataset.input_size,
        cfg.dataset.mean,
        cfg.dataset.std,
        train=True,
        optional_transform=optional_transform,
    )
    val_transform = shared.get_transform(cfg.dataset.input_size,
                                         cfg.dataset.mean,
                                         cfg.dataset.std,
                                         train=False)

    dataset_root = os.path.join(
        hydra.utils.get_original_cwd(), "data"
    )  # this is needed because hydra automatically change working directory.
    train_dataset_class = shared.get_dataset_class(cfg.dataset.name,
                                                   root=dataset_root,
                                                   train=True)
    val_dataset_class = shared.get_dataset_class(cfg.dataset.name,
                                                 root=dataset_root,
                                                 train=False)

    train_dataset = train_dataset_class(transform=train_transform)
    val_dataset = val_dataset_class(transform=val_transform)
    train_dataloader = torch.utils.data.DataLoader(train_dataset,
                                                   cfg.batch_size,
                                                   shuffle=True,
                                                   num_workers=cfg.num_workers)
    val_dataloader = torch.utils.data.DataLoader(val_dataset,
                                                 cfg.batch_size,
                                                 shuffle=False,
                                                 num_workers=cfg.num_workers)

    # make cosin_anealing lambda function. this is needed for manual cosin annealing.
    cosin_annealing_func = functools.partial(
        shared.cosin_annealing,
        total_steps=cfg.epochs * len(train_dataloader),
        lr_max=1.0,
        lr_min=1e-6 / cfg.optimizer.lr,
    )

    # setup optimizer
    optimizer_class = shared.get_optimizer_class(cfg.optimizer)
    scheduler_class = shared.get_scheduler_class(cfg.scheduler,
                                                 cosin_annealing_func)

    # setup mix augmentation
    mixaugment = shared.get_mixaugment(cfg.mixaugment)

    # setup adversarial attack
    criterion = torch.nn.CrossEntropyLoss()
    attack_class = shared.get_attack_class(cfg.attack, cfg.dataset.input_size,
                                           cfg.dataset.mean, cfg.dataset.std,
                                           criterion)

    # train
    litmodel = ClassificationModel(model, criterion, mixaugment, attack_class,
                                   optimizer_class, scheduler_class)
    trainer.fit(litmodel, train_dataloader, val_dataloader)
Exemple #5
0
def main(cfg: omegaconf.DictConfig) -> None:
    """
    Main entry point function to evaluate corruption robustness.
    """
    # setup device
    device = "cuda" if cfg.gpus > 0 else "cpu"

    # setup model
    model = shared.get_model(name=cfg.arch,
                             num_classes=cfg.dataset.num_classes)
    shared.load_model(model, cfg.weight)
    model = model.to(device)
    model.eval()

    # setup dataset
    transform = shared.get_transform(cfg.dataset.input_size,
                                     cfg.dataset.mean,
                                     cfg.dataset.std,
                                     train=False)
    dataset_root = os.path.join(
        hydra.utils.get_original_cwd(), "data"
    )  # this is needed because hydra automatically change working directory.

    dataset_class = shared.get_dataset_class(name=cfg.dataset.name,
                                             root=dataset_root,
                                             train=False)
    dataset = dataset_class(transform=transform)

    loader = torch.utils.data.DataLoader(
        dataset,
        batch_size=cfg.batch_size,
        shuffle=False,
        num_workers=8,
        pin_memory=True,
    )

    normalizer = attacks.Normalizer(input_size=cfg.dataset.input_size,
                                    mean=cfg.dataset.mean,
                                    std=cfg.dataset.std,
                                    device=device,
                                    from_pixel_space=False)
    denormalizer = attacks.Denormalizer(input_size=cfg.dataset.input_size,
                                        mean=cfg.dataset.mean,
                                        std=cfg.dataset.std,
                                        device=device,
                                        to_pixel_space=False)

    df = eval_gaussian_noise_error(
        model,
        loader,
        normalizer,
        denormalizer,
        cfg.savedir,
        device,
        cfg.low_bandwidths,
        cfg.high_bandwidths,
        cfg.eps,
    )

    # save to csv
    df.to_csv("bandpass_error_{name}.csv".format(name=cfg.dataset.name))

    df_highpass = df[df["filter_mode"] == "high_pass"]
    df_lowpass = df[df["filter_mode"] == "low_pass"]

    # save as plot
    result_dict_highpass = dict(
        zip(df_highpass["bandwidth"], df_highpass["err1"]))
    result_dict_lowpass = dict(zip(df_lowpass["bandwidth"],
                                   df_lowpass["err1"]))

    create_barplot(result_dict_highpass, "", "error_highpass.png")
    create_barplot(result_dict_lowpass, "", "error_lowpass.png")
Exemple #6
0
                    adjust_eps=True,
                )
            )
            torchvision.utils.save_image(high_passed, "logs/highpass_w_adjust.png")


if __name__ == "__main__":
    max_scale = 1.0
    max_bandwidth = None

    # test Gaussian
    transform = shared.get_transform(
        32,
        mean=[0.49139968, 0.48215841, 0.44653091],
        std=[0.24703223, 0.24348513, 0.26158784],
        train=False,
        normalize=False,
        optional_transform=[
            noiseaugments.Gaussian(prob=1.0, max_scale=max_scale, randomize_scale=True)
        ],
    )
    dataset = torchvision.datasets.CIFAR10(
        root="data/cifar10", train=False, download=False, transform=transform
    )
    loader = torch.utils.data.DataLoader(dataset, 32, shuffle=False, num_workers=8)

    for x, _ in loader:
        torchvision.utils.save_image(x, "logs/gaussian.png")
        break

    # test PatchGaussian
    transform = shared.get_transform(