Esempio n. 1
0
    def __init__(self, args):

        self.args = args

        transformer = T.Compose([T.ToTensor()])
        kwargs = {'num_workers': 4, 'pin_memory': True}

        self.val_loader = torch.utils.data.DataLoader(
            datasets.CIFAR10(args.data_root,
                             train=False,
                             transform=transformer),
            batch_size=args.batch_size,
            shuffle=True,
            **kwargs)

        # Create model, optimizer and scheduler
        self.model = models.WRN(depth=32, width=10, num_classes=10)
        self.model = torch.nn.DataParallel(self.model).cuda()

        # Loading model
        assert self.args.restore is not None

        model_data = torch.load(self.args.restore)
        self.model.load_state_dict(model_data['model'])
        self.model.eval()

        cudnn.benchmark = True
        self.save_path = self.args.save_path
Esempio n. 2
0
    def __init__(self, args):

        self.args = args

        transformer = T.Compose([T.ToTensor()])
        kwargs = {'num_workers': 4, 'pin_memory': True}

        train_set = datasets.CIFAR10(args.data_root,
                                     train=True,
                                     transform=transformer,
                                     download=True)
        self.train_loader = torch.utils.data.DataLoader(
            train_set, batch_size=args.batch_size, shuffle=True, **kwargs)
        self.train_samples_np = train_set.data.astype(np.float32)
        self.train_samples_np = self.train_samples_np.transpose(0, 3, 1, 2)
        self.train_samples_np = np.reshape(
            self.train_samples_np, (self.train_samples_np.shape[0], -1))

        self.train_samples_np = self.train_samples_np / 255.0
        self.labels_np = np.array(train_set.targets)

        # Create model, optimizer and scheduler
        self.model = models.WRN(depth=32, width=10, num_classes=10)
        self.model = torch.nn.DataParallel(self.model).cuda()

        # Loading model
        assert self.args.restore is not None

        model_data = torch.load(self.args.restore)
        self.model.load_state_dict(model_data['model'])
        self.model.eval()

        cudnn.benchmark = True
        self.attacker = PGDAttacker(args.attack_eps)
        self.save_path = self.args.save_path
    def __init__(self, args):

        self.args = args

        # Creating data loaders
        transform_train = T.Compose([
            T.Pad(4, padding_mode='reflect'),
            T.RandomCrop(32),
            T.RandomHorizontalFlip(),
            T.ToTensor()
        ])

        transform_test = T.Compose([T.ToTensor()])

        kwargs = {'num_workers': 4, 'pin_memory': True}

        train_dataset = datasets.CIFAR10(args.data_root,
                                         train=True,
                                         download=True,
                                         transform=transform_train)
        self.train_loader = torch.utils.data.DataLoader(
            train_dataset, batch_size=args.batch_size, shuffle=True, **kwargs)
        self.val_loader = torch.utils.data.DataLoader(
            datasets.CIFAR10(args.data_root,
                             train=False,
                             transform=transform_test),
            batch_size=args.batch_size,
            shuffle=True,
            **kwargs)

        # Create model, optimizer and scheduler
        self.model = models.WRN(depth=32, width=10, num_classes=10)
        self.model = torch.nn.DataParallel(self.model).cuda()
        self.optimizer = optim.SGD(self.model.parameters(),
                                   args.lr,
                                   momentum=0.9,
                                   weight_decay=args.weight_decay)
        self.lr_scheduler = optim.lr_scheduler.MultiStepLR(
            self.optimizer, milestones=[70, 90, 100], gamma=0.2)

        print('Number of model parameters: {}'.format(
            sum([p.data.nelement() for p in self.model.parameters()])))

        self.save_path = args.save_path
        self.epoch = 0

        num_samples = len(train_dataset)
        self.epsilon_memory = torch.FloatTensor(num_samples).zero_().cuda()

        # resume from checkpoint
        ckpt_path = osp.join(args.save_path, 'checkpoint.pth')
        if osp.exists(ckpt_path):
            self._load_from_checkpoint(ckpt_path)
        elif args.restore:
            self._load_from_checkpoint(args.restore)

        cudnn.benchmark = True
        self.attacker = PGDAttackerAdaptive()
        self.attacker_test = PGDAttacker(args.attack_eps)
    def __init__(self, args):

        self.args = args

        transformer = T.Compose([T.ToTensor()])
        kwargs = {'num_workers': 4, 'pin_memory': True}

        self.val_loader = torch.utils.data.DataLoader(
            datasets.CIFAR10(args.data_root,
                             train=False,
                             transform=transformer),
            batch_size=args.batch_size,
            shuffle=True,
            **kwargs)

        # Create model, optimizer and scheduler
        self.model = models.WRN(depth=34, width=1, num_classes=10)
        if args.spbn:
            print("SPBN training!")
            self.model = models.convert_splitbn_model(self.model).cuda()
        else:
            self.model.cuda()

        # Loading model
        assert self.args.restore is not None

        model_data = torch.load(self.args.restore)
        self.model.load_state_dict(model_data['model'])
        self.model.eval()

        cudnn.benchmark = True
        self.save_path = self.args.save_path
        self.epsilons = args.epsilon / 255.0

        # Foolbox Attack #
        self.model = foolbox.PyTorchModel(self.model, bounds=(0, 1))
        if args.attack == 'FGSM':
            self.attack = foolbox.attacks.LinfFastGradientAttack(
                random_start=True)
        elif args.attack == 'PGD':
            self.attack = foolbox.attacks.LinfPGD(steps=args.attack_steps,
                                                  abs_stepsize=2.0 / 255.0,
                                                  random_start=True)
        elif args.attack == 'BA':
            self.attack = foolbox.attacks.BoundaryAttack()
        elif args.attack == 'CW':
            self.attack = foolbox.attacks.L2CarliniWagnerAttack(
                steps=1000, confidence=20)  # confidence == kappa
Esempio n. 5
0
    def __init__(self, args):

        self.args = args

        # Creating data loaders
        transform_train = T.Compose([
            T.Pad(4, padding_mode='reflect'),
            T.RandomCrop(32),
            T.RandomHorizontalFlip(),
            T.ToTensor()
        ])

        transform_test = T.Compose([T.ToTensor()])

        kwargs = {'num_workers': 8, 'pin_memory': True}

        self.train_loader = torch.utils.data.DataLoader(
            datasets.CIFAR10(args.data_root,
                             train=True,
                             download=True,
                             transform=transform_train),
            batch_size=args.batch_size,
            shuffle=True,
            **kwargs)
        self.val_loader = torch.utils.data.DataLoader(
            datasets.CIFAR10(args.data_root,
                             train=False,
                             transform=transform_test),
            batch_size=args.batch_size,
            shuffle=True,
            **kwargs)

        # Create model, optimizer and scheduler
        self.model = models.WRN(depth=34, width=1, num_classes=10)

        self.spbn_flag = args.spbn
        if self.spbn_flag:
            print("SPBN training!")
            self.model = models.convert_splitbn_model(self.model,
                                                      momentum=0.5).cuda()
        else:
            self.model.cuda()

        self.lambda_ = 0.9

        # spbn_1 = 0.7 adv momentum = 0.1
        # spbn_2 = 0.7, adv_momentum = 0.01
        # spbn_3 = 0.9, adv_momentum = 0.01
        # spbn_4 = 0.9, adv_momentum = 0.5

        self.optimizer = optim.SGD(self.model.parameters(),
                                   args.lr,
                                   momentum=0.9,
                                   weight_decay=args.weight_decay)
        self.lr_scheduler = optim.lr_scheduler.MultiStepLR(
            self.optimizer, milestones=[60, 120, 160], gamma=0.1)

        print('Number of model parameters: {}'.format(
            sum([p.data.nelement() for p in self.model.parameters()])))

        self.save_path = args.save_path
        self.epoch = 0
        cudnn.benchmark = True
        self.attacker = PGDAttacker(args.attack_eps)

        # resume from checkpoint
        if args.resume:
            ckpt_path = os.path.join(args.save_path, 'checkpoint.pth')
            if os.path.exists(ckpt_path):
                self._load_from_checkpoint(ckpt_path)
            elif args.restore:
                self._load_from_checkpoint(args.restore)