Exemplo n.º 1
0
def get_architecture(arch: str, dataset: str) -> torch.nn.Module:
    """ Return a neural network (with random weights)

    :param arch: the architecture - should be in the ARCHITECTURES list above
    :param dataset: the dataset - should be in the datasets.DATASETS list
    :return: a Pytorch module
    """
    if arch == "resnet50" and dataset in ["imagenet", "restricted_imagenet"]:
        model = resnet50(pretrained=False)
        if dataset == "restricted_imagenet":
            model.fc = torch.nn.Linear(in_features=2048,
                                       out_features=10,
                                       bias=True)
        model = torch.nn.DataParallel(model).cuda()
        cudnn.benchmark = True
    elif arch == "cifar_resnet20":
        model = resnet_cifar(depth=20, num_classes=10).cuda()
    elif arch == "cifar_resnet110":
        model = resnet_cifar(depth=110, num_classes=10).cuda()
    elif arch == "imagenet32_resnet110":
        model = resnet_cifar(depth=110, num_classes=1000).cuda()

    # Both layers work fine, We tried both, and they both
    # give very similar results
    # IF YOU USE ONE OF THESE FOR TRAINING, MAKE SURE
    # TO USE THE SAME WHEN CERTIFYING.
    normalize_layer = get_normalize_layer(dataset)
    # normalize_layer = get_input_center_layer(dataset)
    if dataset == 'cifar10':
        V = VingetteModule((3, 32, 32), 'circ', 2).to('cuda')
    else:
        V = VingetteModule((3, 224, 224), 'circ', 2).to('cuda')
    return torch.nn.Sequential(V, normalize_layer, model)
Exemplo n.º 2
0
def get_architecture(arch: str, dataset: str, device) -> torch.nn.Module:
    """ Return a neural network (with random weights)

    :param arch: the architecture - should be in the ARCHITECTURES list above
    :param dataset: the dataset - should be in the datasets.DATASETS list
    :return: a Pytorch module
    """
    if arch == "resnet50" and dataset == "imagenet":
        model = torch.nn.DataParallel(resnet50(pretrained=False)).to(device)
        cudnn.benchmark = True
    elif arch == "cifar_resnet20":
        model = resnet_cifar(depth=20, num_classes=10).to(device)
    elif arch == "cifar_resnet32":
        model = resnet_cifar(depth=32, num_classes=10).to(device)
    elif arch == "cifar_resnet110":
        model = resnet_cifar(depth=110, num_classes=10).to(device)
    elif arch == "lenet300":
        model = lenet300(num_classes=10).to(device)
    elif arch == "lenet5":
        model = lenet5(num_classes=10).to(device)
    elif arch == "fcn":
        model = fcn(num_classes=10).to(device)
    elif arch == "vgg19":
        model = vgg().to(device)
    elif arch == "vgg16":
        model = vgg_16().to(device)
    elif arch == "wide_resnet":
        model = wide_resnet().to(device)
    elif arch == "resnet20":
        model = resnet20().to(device)
    elif arch == "resnet32":
        model = resnet32().to(device)

    return model
Exemplo n.º 3
0
def get_architecture(arch: str) -> torch.nn.Module:
    """ Return a neural network (with random weights)

    :param arch: the architecture - should be in the ARCHITECTURES list above
    :param dataset: the dataset - should be in the datasets.DATASETS list
    :return: a Pytorch module
    """
    if arch == "cifar_resnet20":
        model = resnet_cifar(depth=20, num_classes=10).cuda()
    elif arch == "cifar_resnet110":
        model = resnet_cifar(depth=110, num_classes=10).cuda()
    normalize_layer = NormalizeLayer(_CIFAR10_MEAN, _CIFAR10_STDDEV)
    return torch.nn.Sequential(normalize_layer, model)
Exemplo n.º 4
0
def get_architecture(arch: str, dataset: str) -> torch.nn.Module:
    """ Return a neural network (with random weights)

    :param arch: the architecture - should be in the ARCHITECTURES list above
    :param dataset: the dataset - should be in the datasets.DATASETS list
    :return: a Pytorch module
    """

    ORDERED_CLASS_LABELS = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
    if arch == "resnet50" and dataset == "imagenet":
        model = torch.nn.DataParallel(resnet50(pretrained=False)).cuda()
        cudnn.benchmark = True
    elif arch == "cifar_resnet20":
        model = DeepCNN(len(ORDERED_CLASS_LABELS)) 
        # model = resnet_cifar(depth=20, num_classes=10).cuda()
    elif arch == "cifar_resnet110":
        model = DeepCNN(len(ORDERED_CLASS_LABELS)) 
        # model = resnet_cifar(depth=110, num_classes=10).cuda()
    elif arch == "imagenet32_resnet110":
        model = resnet_cifar(depth=110, num_classes=1000).cuda()

    # Both layers work fine, We tried both, and they both
    # give very similar results 
    # IF YOU USE ONE OF THESE FOR TRAINING, MAKE SURE
    # TO USE THE SAME WHEN CERTIFYING.
    normalize_layer = get_normalize_layer(dataset)
    # normalize_layer = get_input_center_layer(dataset)
    return torch.nn.Sequential(normalize_layer, model)
def get_architecture(arch: str, dataset: str) -> torch.nn.Module:
    """ Return a neural network (with random weights)

    :param arch: the architecture - should be in the ARCHITECTURES list above
    :param dataset: the dataset - should be in the datasets.DATASETS list
    :return: a Pytorch module
    """
    if arch == "resnet50" and dataset == "imagenet":
        model = torch.nn.DataParallel(resnet50(pretrained=False)).cuda()#resnet50(pretrained=False)
        cudnn.benchmark = True
    elif arch == "cifar_resnet20":
        model = resnet_cifar(depth=20, num_classes=10).cuda()
    elif arch == "cifar_resnet110":
        model = resnet_cifar(depth=110, num_classes=10).cuda()
    normalize_layer = get_normalize_layer(dataset)
    return torch.nn.Sequential(normalize_layer, model)#torch.nn.DataParallel(torch.nn.Sequential(normalize_layer, model))#
Exemplo n.º 6
0
def main():
    # init model, other architectures can be also used here for training
    model = resnet_cifar(depth=110, num_classes=10).to(device)
    model = nn.DataParallel(model)
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)

    for epoch in range(0, args.epochs + 1):
        # adjust learning rate for SGD
        adjust_learning_rate(optimizer, epoch)

        # adversarial training
        train(args, model, device, train_loader, optimizer, epoch)

        # evaluation on natural examples
        print(
            '================================================================')
        eval_train(model, device, train_loader)
        eval_test(model, device, test_loader)

        # save checkpoint
        torch.save(model.state_dict(),
                   os.path.join(model_dir, 'model-res110-epoch.pt'))
        torch.save(optimizer.state_dict(),
                   os.path.join(model_dir, 'opt-res110-checkpoint-epoch.tar'))
        print(
            '================================================================')
Exemplo n.º 7
0
def get_architecture(arch: str,
                     dataset: str,
                     pytorch_pretrained: bool = False) -> torch.nn.Module:
    """ Return a neural network (with random weights)

    :param arch: the architecture - should be in the ARCHITECTURES list above
    :param dataset: the dataset - should be in the datasets.DATASETS list
    :return: a Pytorch module
    """
    ## ImageNet classifiers
    if arch == "resnet18" and dataset == "imagenet":
        model = torch.nn.DataParallel(
            resnet18(pretrained=pytorch_pretrained)).cuda()
        cudnn.benchmark = True
    elif arch == "resnet34" and dataset == "imagenet":
        model = torch.nn.DataParallel(
            resnet34(pretrained=pytorch_pretrained)).cuda()
        cudnn.benchmark = True
    elif arch == "resnet50" and dataset == "imagenet":
        model = torch.nn.DataParallel(
            resnet50(pretrained=pytorch_pretrained)).cuda()
        cudnn.benchmark = True

    ## Cifar classifiers
    elif arch == "cifar_resnet20":
        model = resnet_cifar(depth=20, num_classes=10).cuda()
    elif arch == "cifar_resnet110":
        model = resnet_cifar(depth=110, num_classes=10).cuda()
    elif arch == "imagenet32_resnet110":
        model = resnet_cifar(depth=110, num_classes=1000).cuda()
    elif arch == "imagenet32_wrn":
        model = WideResNet(depth=28, num_classes=1000, widen_factor=10).cuda()

    # Cifar10 Models from https://github.com/kuangliu/pytorch-cifar
    # The 14 models we use in the paper as surrogate models
    elif arch == "cifar_wrn":
        model = WideResNet(depth=28, num_classes=10, widen_factor=10).cuda()
    elif arch == "cifar_wrn40":
        model = WideResNet(depth=40, num_classes=10, widen_factor=10).cuda()
    elif arch == "VGG16":
        model = VGG('VGG16').cuda()
    elif arch == "VGG19":
        model = VGG('VGG19').cuda()
    elif arch == "ResNet18":
        model = ResNet18().cuda()
    elif arch == "PreActResNet18":
        model = PreActResNet18().cuda()
    elif arch == "GoogLeNet":
        model = GoogLeNet().cuda()
    elif arch == "DenseNet121":
        model = DenseNet121().cuda()
    elif arch == "ResNeXt29_2x64d":
        model = ResNeXt29_2x64d().cuda()
    elif arch == "MobileNet":
        model = MobileNet().cuda()
    elif arch == "MobileNetV2":
        model = MobileNetV2().cuda()
    elif arch == "SENet18":
        model = SENet18().cuda()
    elif arch == "ShuffleNetV2":
        model = ShuffleNetV2(1).cuda()
    elif arch == "EfficientNetB0":
        model = EfficientNetB0().cuda()

    ## Image Denoising Architectures
    elif arch == "cifar_dncnn":
        model = DnCNN(image_channels=3, depth=17, n_channels=64).cuda()
        return model
    elif arch == "cifar_dncnn_wide":
        model = DnCNN(image_channels=3, depth=17, n_channels=128).cuda()
        return model
    elif arch == 'memnet':
        model = MemNet(in_channels=3,
                       channels=64,
                       num_memblock=3,
                       num_resblock=6).cuda()
        return model
    elif arch == "imagenet_dncnn":
        model = torch.nn.DataParallel(
            DnCNN(image_channels=3, depth=17, n_channels=64)).cuda()
        cudnn.benchmark = True
        return model
    elif arch == 'imagenet_memnet':
        model = torch.nn.DataParallel(
            MemNet(in_channels=3, channels=64, num_memblock=3,
                   num_resblock=6)).cuda()
        cudnn.benchmark = True
        return model
    else:
        raise Exception('Unknown architecture.')

    normalize_layer = get_normalize_layer(dataset)
    return torch.nn.Sequential(normalize_layer, model)
Exemplo n.º 8
0
    """
    h = img.size(1)
    w = img.size(2)

    mask = np.ones((h, w), np.float32)

    mask = torch.from_numpy(mask)
    mask = mask.repeat(3, 1, 1)

    return mask


if __name__ == "__main__":
    # load the base classifier
    # base_classifier = WideResNet().cuda()
    base_classifier = resnet_cifar(depth=110, num_classes=10).to(device)
    base_classifier = nn.DataParallel(base_classifier)
    base_classifier.load_state_dict(torch.load(args.load_dir))

    # create the smooothed classifier g
    smoothed_classifier = Smooth(base_classifier,
                                 get_num_classes(args.dataset), args.sigma)

    # prepare output file
    f = open(args.outfile, 'w')
    print("idx\tlabel\tpredict\tradius\tcorrect\ttime", file=f, flush=True)

    # iterate through the dataset
    dataset = get_dataset(args.dataset, args.split, args.imagesize)
    for i in tqdm(range(len(dataset))):