コード例 #1
0
ファイル: IGR_Test.py プロジェクト: zjz5250/DEEPSEC
def main(args):
    # Device configuration
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_index
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # Set the random seed manually for reproducibility.
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    torch.manual_seed(args.seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)
    random.seed(args.seed)

    batch_size = 256  # following the settings of the original paper
    # Get training parameters, set up model frameworks and then get the train_loader and test_loader
    dataset = args.dataset.upper()
    assert dataset == 'MNIST' or dataset == 'CIFAR10'
    if args.dataset == 'MNIST':
        training_parameters = MNIST_Training_Parameters
        model_framework = MNISTConvNet().to(device)
        train_loader, valid_loader = get_mnist_train_validate_loader(dir_name='../RawModels/MNIST/', batch_size=batch_size, valid_size=0.1,
                                                                     shuffle=True)
    else:
        training_parameters = CIFAR10_Training_Parameters
        model_framework = resnet20_cifar().to(device)
        train_loader, valid_loader = get_cifar10_train_validate_loader(dir_name='../RawModels/CIFAR10/', batch_size=batch_size, valid_size=0.1,
                                                                       shuffle=True)

    defense_name = 'IGR'
    igr = IGRDefense(model=model_framework, defense_name=defense_name, dataset=dataset, training_parameters=training_parameters,
                     lambda_r=args.lambda_r, device=device)
    igr.defense(train_loader=train_loader, validation_loader=valid_loader)
コード例 #2
0
ファイル: RT_Test.py プロジェクト: zjz5250/DEEPSEC
def main(args):
    # Device configuration
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_index
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # Set the random seed manually for reproducibility.
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    torch.manual_seed(args.seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)
    random.seed(args.seed)

    batch_size = 200
    model_location = '{}/{}/model/{}_raw.pt'.format('../RawModels',
                                                    args.dataset, args.dataset)
    # Get training parameters, set up model frameworks and then get the train_loader and test_loader
    dataset = args.dataset.upper()
    assert dataset == 'MNIST' or dataset == 'CIFAR10'
    if dataset == 'MNIST':
        raw_model = MNISTConvNet().to(device)
        test_loader = get_mnist_test_loader(dir_name='../RawModels/MNIST/',
                                            batch_size=batch_size)
    else:
        raw_model = resnet20_cifar().to(device)
        test_loader = get_cifar10_test_loader(dir_name='../RawModels/CIFAR10/',
                                              batch_size=batch_size)
    raw_model.load(path=model_location, device=device)

    defense_name = 'RT'
    rt = RTDefense(model=raw_model,
                   defense_name=defense_name,
                   dataset=dataset,
                   device=device)

    # predicting the testing dataset using the randomization transformation defense
    raw_model.eval()
    total = 0.0
    correct = 0.0
    with torch.no_grad():
        for index, (images, labels) in enumerate(test_loader):
            # input images first go through the randomization transformation layer and then the resulting images are feed into the original model
            transformed_images = rt.randomization_transformation(
                samples=images,
                original_size=images.shape[-1],
                final_size=args.resize)
            outputs = raw_model(transformed_images)

            labels = labels.to(device)
            _, predicted = torch.max(outputs.data, 1)
            total = total + labels.size(0)
            correct = correct + (predicted == labels).sum().item()
        ratio = correct / total
        print(
            '\nTest accuracy of the {} model on the testing dataset: {:.1f}/{:.1f} = {:.2f}%\n'
            .format(raw_model.model_name, correct, total, ratio * 100))
コード例 #3
0
def main(args):
    # Device configuration
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_index
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # Set the random seed manually for reproducibility.
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    torch.manual_seed(args.seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)
    random.seed(args.seed)

    # Get training parameters, set up model frameworks and then get the train_loader and test_loader
    dataset = args.dataset.upper()
    assert dataset == 'MNIST' or dataset == 'CIFAR10'
    if dataset == 'MNIST':
        training_parameters = MNIST_Training_Parameters
        model_framework = MNISTConvNet().to(device)
        batch_size = training_parameters['batch_size']
        train_loader, valid_loader = get_mnist_train_validate_loader(
            dir_name='../RawModels/MNIST/',
            batch_size=batch_size,
            valid_size=0.1,
            shuffle=True)
    else:
        training_parameters = CIFAR10_Training_Parameters
        model_framework = resnet20_cifar().to(device)
        batch_size = training_parameters['batch_size']
        train_loader, valid_loader = get_cifar10_train_validate_loader(
            dir_name='../RawModels/CIFAR10/',
            batch_size=batch_size,
            valid_size=0.1,
            shuffle=True)

    defense_name = 'NAT'
    nat_params = {
        'adv_ratio': args.adv_ratio,
        'eps_min': args.clip_min,
        'eps_max': args.clip_max,
        'mu': args.eps_mu,
        'sigma': args.eps_sigma
    }

    nat = NATDefense(model=model_framework,
                     defense_name=defense_name,
                     dataset=dataset,
                     training_parameters=training_parameters,
                     device=device,
                     **nat_params)
    nat.defense(train_loader=train_loader, validation_loader=valid_loader)
コード例 #4
0
    def __init__(self, dataset='MNIST', attack_name='FGSM', targeted=False, raw_model_location='../RawModels/',
                 clean_data_location='../CleanDatasets/', adv_examples_dir='../AdversarialExampleDatasets/',
                 device=torch.device('cpu')):
        # check and set the support data set
        self.dataset = dataset.upper()
        if self.dataset not in {'MNIST', 'CIFAR10'}:
            raise ValueError("The data set must be MNIST or CIFAR10")

        # check and set the supported attack
        self.attack_name = attack_name.upper()
        supported = {'FGSM', 'RFGSM', 'BIM', 'PGD', 'UMIFGSM', 'UAP', 'DEEPFOOL', 'OM', 'LLC', "RLLC", 'ILLC', 'TMIFGSM', 'JSMA', 'BLB', 'CW2',
                     'EAD'}
        if self.attack_name not in supported:
            raise ValueError(self.attack_name + 'is unknown!\nCurrently, our implementation support the attacks: ' + ', '.join(supported))

        # load the raw model
        raw_model_location = '{}{}/model/{}_raw.pt'.format(raw_model_location, self.dataset, self.dataset)
        if self.dataset == 'MNIST':
            self.raw_model = MNISTConvNet().to(device)
            self.raw_model.load(path=raw_model_location, device=device)
        else:
            self.raw_model = resnet20_cifar().to(device)
            self.raw_model.load(path=raw_model_location, device=device)

        # get the clean data sets / true_labels / targets (if the attack is one of the targeted attacks)
        print('Loading the prepared clean samples (nature inputs and corresponding labels) that will be attacked ...... ')
        self.nature_samples = np.load('{}{}/{}_inputs.npy'.format(clean_data_location, self.dataset, self.dataset))
        self.labels_samples = np.load('{}{}/{}_labels.npy'.format(clean_data_location, self.dataset, self.dataset))

        if targeted:
            print('For Targeted Attacks, loading the randomly selected targeted labels that will be attacked ......')
            if self.attack_name.upper() in ['LLC', 'RLLC', 'ILLC']:
                print('#### Especially, for LLC, RLLC, ILLC, loading the least likely class that will be attacked')
                self.targets_samples = np.load('{}{}/{}_llc.npy'.format(clean_data_location, self.dataset, self.dataset))
            else:
                self.targets_samples = np.load('{}{}/{}_targets.npy'.format(clean_data_location, self.dataset, self.dataset))

        # prepare the directory for the attacker to save their generated adversarial examples
        self.adv_examples_dir = adv_examples_dir + self.attack_name + '/' + self.dataset + '/'
        if self.attack_name not in os.listdir(adv_examples_dir):
            os.mkdir(adv_examples_dir + self.attack_name + '/')

        if self.dataset not in os.listdir(adv_examples_dir + self.attack_name + '/'):
            os.mkdir(self.adv_examples_dir)
        else:
            shutil.rmtree('{}'.format(self.adv_examples_dir))
            os.mkdir(self.adv_examples_dir)

        # set up device
        self.device = device
コード例 #5
0
ファイル: EIT_Test.py プロジェクト: zjz5250/DEEPSEC
def main(args):
    # Device configuration
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_index
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # Set the random seed manually for reproducibility.
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    torch.manual_seed(args.seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)
    random.seed(args.seed)

    dataset = args.dataset.upper()
    assert dataset == 'MNIST' or dataset == 'CIFAR10'
    if dataset == 'MNIST':
        training_parameters = MNIST_Training_Parameters
        model_framework = MNISTConvNet().to(device)
        batch_size = training_parameters['batch_size']
        raw_train_loader, raw_valid_loader = get_mnist_train_validate_loader(dir_name='../RawModels/MNIST/', batch_size=batch_size,
                                                                             valid_size=0.1, shuffle=False)
    else:
        training_parameters = CIFAR10_Training_Parameters
        model_framework = resnet20_cifar().to(device)
        batch_size = training_parameters['batch_size']
        raw_train_loader, raw_valid_loader = get_cifar10_train_validate_loader(dir_name='../RawModels/CIFAR10/', augment=False,
                                                                               batch_size=batch_size, valid_size=0.1, shuffle=False)
        print('cifar 10', len(raw_train_loader.dataset))

    defense_name = 'EIT'
    eit_params = {
        'crop_size': args.crop_size,
        'lambda_tv': args.lambda_tv,
        'JPEG_quality': args.JPEG_quality,
        'bit_depth': args.bit_depth
    }

    EIT = EITDefense(model=model_framework, defense_name=defense_name, dataset=dataset, re_training=True,
                     training_parameters=training_parameters, device=device, **eit_params)
    EIT.defense(train_loader=raw_train_loader, valid_loader=raw_valid_loader)
コード例 #6
0
def main(args):
    # Device configuration
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_index
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # Set the random seed manually for reproducibility.
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    torch.manual_seed(args.seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)
    random.seed(args.seed)

    dataset = args.dataset.upper()
    assert dataset == 'MNIST' or dataset == 'CIFAR10'

    # load the raw model / testing dataset loader
    raw_model_location = '{}{}/model/{}_raw.pt'.format('../RawModels/', dataset, dataset)
    if dataset == 'MNIST':
        raw_model = MNISTConvNet().to(device)
        raw_model.load(path=raw_model_location, device=device)
        test_loader = get_mnist_test_loader(dir_name='../RawModels/MNIST/', batch_size=30)
    else:
        raw_model = resnet20_cifar().to(device)
        raw_model.load(path=raw_model_location, device=device)
        test_loader = get_cifar10_test_loader(dir_name='../RawModels/CIFAR10/', batch_size=25)
    raw_model.eval()

    # get predictions of the raw model on test datasets
    predicted_raw, true_label = prediction(model=raw_model, test_loader=test_loader, device=device)

    re_train_defenses = {'NAT', 'EAT', 'PAT', 'DD', 'IGR'}
    input_transformation_defenses = {'EIT', 'RT', 'PD', 'TE'}
    other_defenses = {'RC'}

    defense_name = args.defense.upper().strip()
    if defense_name in re_train_defenses:
        print('\nthe ##{}## defense is a kind of complete defenses that retrain the model'.format(defense_name))
        # load the defense-enhanced model
        defended_model_location = '{}/{}/{}_{}_enhanced.pt'.format('../DefenseEnhancedModels', defense_name, dataset, defense_name)
        defended_model = MNISTConvNet().to(device) if dataset == 'MNIST' else resnet20_cifar().to(device)
        defended_model.load(path=defended_model_location, device=device)
        defended_model.eval()
        predicted_defended, _ = prediction(model=defended_model, test_loader=test_loader, device=device)
        raw_acc, def_acc, cav, crr, csr, ccv, cos = defense_utility_measure(predicted_defended, predicted_raw, true_label)

    elif defense_name in input_transformation_defenses:
        print('\nthe ##{}## defense is a kind of complete defense hat need to transform the images ... '.format(defense_name))
        if defense_name == 'EIT':

            from Defenses.DefenseMethods.EIT import EITDefense, TransformedDataset
            eit_params = {
                'crop_size': args.crop_size,
                'lambda_tv': args.lambda_tv,
                'JPEG_quality': args.JPEG_quality,
                'bit_depth': args.bit_depth
            }
            defended_model = MNISTConvNet().to(device) if dataset == 'MNIST' else resnet20_cifar().to(device)
            EIT = EITDefense(model=defended_model, defense_name=defense_name, dataset=dataset, re_training=False, training_parameters=None,
                             device=device, **eit_params)
            transformed_test_data_numpy, test_label_numpy = EIT.transforming_dataset(data_loader=test_loader)
            transformed_test_dataset = TransformedDataset(images=torch.from_numpy(transformed_test_data_numpy),
                                                          labels=torch.from_numpy(test_label_numpy), dataset=dataset, transform=None)
            transformed_test_loader = torch.utils.data.DataLoader(transformed_test_dataset, batch_size=100, shuffle=False)

            defended_model_location = '{}/{}/{}_{}_enhanced.pt'.format('../DefenseEnhancedModels', defense_name, dataset, defense_name)
            defended_model = defended_model.to(device)
            defended_model.load(path=defended_model_location, device=device)
            defended_model.eval()

            predicted_defended, _ = prediction(model=defended_model, test_loader=transformed_test_loader, device=device)
            raw_acc, def_acc, cav, crr, csr, ccv, cos = defense_utility_measure(predicted_defended, predicted_raw, true_label)

        elif defense_name == 'RT':
            final_size = args.resize
            assert isinstance(final_size, int)
            warnings.warn(message='For the RT defense, the #resize# parameter is specified as {}, please check ...'.format(final_size))
            predicted_defended = rt_prediction(model=raw_model, dataset=dataset, data_loader=test_loader, final_size=final_size, device=device)
            # test the utility performance of defended model
            raw_acc, def_acc, cav, crr, csr, ccv, cos = defense_utility_measure(predicted_defended, predicted_raw, true_label)
        elif defense_name == 'PD':
            epsilon = args.epsilon
            warnings.warn(message='For the PixelDefend defense, the #epsilon# parameter is specified as {}, please check ...'.format(epsilon))
            predicted_defended = pd_prediction(model=raw_model, dataset=dataset, data_loader=test_loader, epsilon=epsilon, device=device)
            raw_acc, def_acc, cav, crr, csr, ccv, cos = defense_utility_measure(predicted_defended, predicted_raw, true_label)
        else:
            level = args.level
            assert defense_name == 'TE' and isinstance(level, int)
            warnings.warn(message='For the TE defense, the #level# parameter is specified as {}, please check ...'.format(level))

            # load the defense-enhanced model (for TE)
            defended_model_location = '{}/{}/{}_{}_enhanced.pt'.format('../DefenseEnhancedModels', defense_name, dataset, defense_name)
            defended_model = MNISTConvNet(thermometer=True, level=level) if dataset == 'MNIST' else resnet20_cifar(thermometer=True, level=level)
            defended_model = defended_model.to(device)
            defended_model.load(path=defended_model_location, device=device)
            defended_model.eval()
            predicted_defended = te_prediction(model=defended_model, data_loader=test_loader, level=level, device=device)
            raw_acc, def_acc, cav, crr, csr, ccv, cos = defense_utility_measure(predicted_defended, predicted_raw, true_label)

    else:
        if defense_name == 'RC':
            print('\n##{}## defense is a kind of region-based classification defenses ... '.format(defense_name))
            from Defenses.DefenseMethods.RC import RCDefense
            num_points = 1000
            radius = args.radius
            rc = RCDefense(model=raw_model, defense_name='RC', dataset=dataset, device=device, num_points=num_points)

            predicted_defended = []
            with torch.no_grad():
                for index, (images, labels) in enumerate(test_loader):
                    rc_labels = rc.region_based_classification(samples=images, radius=radius)
                    predicted_defended.extend(rc_labels)
            predicted_defended = np.array(predicted_defended)

            # classification accuracy of defense-enhanced model
            correct_prediction_def = np.equal(predicted_defended, true_label)
            def_acc = np.mean(correct_prediction_def.astype(float))
            # classification accuracy of raw model
            correct_prediction_raw = np.equal(np.argmax(predicted_raw, axis=1), true_label)
            raw_acc = np.mean(correct_prediction_raw.astype(float))
            # Classification Accuracy Variance(CAV)
            cav = def_acc - raw_acc

            # Find the index of correct predicted examples by defence-enhanced model and raw model
            idx_def = np.squeeze(np.argwhere(correct_prediction_def == True))
            idx_raw = np.squeeze(np.argwhere(correct_prediction_raw == True))
            idx = np.intersect1d(idx_def, idx_raw, assume_unique=True)

            # Compute the Classification Rectify Ratio(CRR) & Classification Sacrifice Ratio(CSR)
            crr = (len(idx_def) - len(idx)) / len(predicted_raw)
            csr = (len(idx_raw) - len(idx)) / len(predicted_raw)
            ccv = cos = 0

        else:
            raise ValueError('{} is not supported!!!'.format(defense_name))

    print("****************************")
    print("The utility evaluation results of the {} defense for {} Dataset are as follow:".format(defense_name, dataset))
    print('Acc of Raw Model:\t\t{:.2f}%'.format(raw_acc * 100))
    print('Acc of {}-enhanced Model:\t{:.2f}%'.format(defense_name, def_acc * 100))
    print('CAV: {:.2f}%'.format(cav * 100))
    print('CRR: {:.2f}%'.format(crr * 100))
    print('CSR: {:.2f}%'.format(csr * 100))
    print('CCV: {:.2f}%'.format(ccv * 100))
    print('COS: {:.4f}'.format(cos))
    print("****************************")
コード例 #7
0
    def defense_predication(self, DefenseModelDirs, defense_name, **kwargs):
        """

        :param DefenseModelDirs:
        :param defense_name:
        :param kwargs:
        :return:
        """
        re_train_defenses = {'NAT', 'EAT', 'PAT', 'DD', 'IGR'}
        input_transformation_defenses = {'EIT', 'RT', 'PD', 'TE'}
        other_defenses = {'RC'}

        defense_name = defense_name.upper().strip()
        assert defense_name in re_train_defenses or input_transformation_defenses or other_defenses

        if defense_name in re_train_defenses:
            print(
                '\n##{}## defense is a kind of complete defenses that retrain the model'
                .format(defense_name))
            # load the defense-enhanced model
            defended_model_location = '{}/{}/{}_{}_enhanced.pt'.format(
                DefenseModelDirs, defense_name, self.dataset, defense_name)
            defended_model = MNISTConvNet().to(
                self.device) if self.dataset == 'MNIST' else resnet20_cifar(
                ).to(self.device)
            defended_model.load(path=defended_model_location,
                                device=self.device)
            defended_model.eval()

            predication = predict(model=defended_model,
                                  samples=self.adv_samples,
                                  device=self.device)
            labels = torch.argmax(predication, 1).cpu().numpy()
            return labels

        elif defense_name in input_transformation_defenses:
            print(
                '\n##{}## defense is a kind of complete defense that need to transform the images ... '
                .format(defense_name))
            if defense_name == 'EIT':

                from Defenses.DefenseMethods.EIT import EITDefense
                eit_params = {
                    'crop_size': kwargs['crop_size'],
                    'lambda_tv': kwargs['lambda_tv'],
                    'JPEG_quality': kwargs['JPEG_quality'],
                    'bit_depth': kwargs['bit_depth']
                }
                defended_model = MNISTConvNet().to(
                    self.device
                ) if self.dataset == 'MNIST' else resnet20_cifar().to(
                    self.device)
                defended_model_location = '{}/{}/{}_{}_enhanced.pt'.format(
                    '../DefenseEnhancedModels', defense_name, self.dataset,
                    defense_name)
                defended_model = defended_model.to(self.device)
                defended_model.load(path=defended_model_location,
                                    device=self.device)
                defended_model.eval()

                EIT = EITDefense(model=defended_model,
                                 defense_name=defense_name,
                                 dataset=self.dataset,
                                 re_training=False,
                                 training_parameters=None,
                                 device=self.device,
                                 **eit_params)

                transformed_images = EIT.ensemble_input_transformations(
                    images=self.adv_samples)
                predication = predict(model=defended_model,
                                      samples=transformed_images,
                                      device=self.device)
                labels = torch.argmax(predication, 1).cpu().numpy()
                return labels

            elif defense_name == 'RT':
                assert 'rt_resize' in kwargs
                final_size = kwargs['rt_resize']
                assert isinstance(final_size, int)
                warnings.warn(
                    message=
                    'For the RT defense, the #resize# parameter is specified as {}, please check ...'
                    .format(final_size))

                from Defenses.DefenseMethods.RT import RTDefense
                rt = RTDefense(model=self.raw_model,
                               defense_name='RT',
                               dataset=self.dataset,
                               device=self.device)
                transformed_images = rt.randomization_transformation(
                    samples=self.adv_samples,
                    original_size=self.adv_samples.shape[-1],
                    final_size=final_size)
                predication = predict(model=self.raw_model,
                                      samples=transformed_images,
                                      device=self.device)
                labels = torch.argmax(predication, 1).cpu().numpy()
                return labels

            elif defense_name == 'PD':
                assert 'pd_eps' in kwargs
                epsilon = kwargs['pd_eps']
                warnings.warn(
                    message=
                    'For the PixelDefend defense, the #epsilon# parameter is specified as {}, please check ...'
                    .format(epsilon))
                from Defenses.DefenseMethods.PD import PixelDefend

                pd = PixelDefend(model=self.raw_model,
                                 defense_name='PD',
                                 dataset=self.dataset,
                                 pixel_cnn_dir='../Defenses/',
                                 device=self.device)
                purified_images = pd.de_noising_samples_batch(
                    samples=self.adv_samples, batch_size=20, eps=epsilon)

                predication = predict(model=self.raw_model,
                                      samples=purified_images,
                                      device=self.device)
                labels = torch.argmax(predication, 1).cpu().numpy()
                return labels

            else:
                assert 'te_level' in kwargs
                level = kwargs['te_level']
                assert defense_name == 'TE' and isinstance(level, int)
                warnings.warn(
                    message=
                    'For the TE defense, the #level# parameter is specified as {}, please check ...'
                    .format(level))

                # load the defense-enhanced model (for TE)
                defended_model_location = '{}/{}/{}_{}_enhanced.pt'.format(
                    '../DefenseEnhancedModels', defense_name, self.dataset,
                    defense_name)
                te_defended_model = MNISTConvNet(thermometer=True, level=level).to(self.device) if self.dataset == 'MNIST' \
                    else resnet20_cifar(thermometer=True, level=level).to(self.device)

                te_defended_model.load(path=defended_model_location,
                                       device=self.device)
                te_defended_model.eval()

                from Defenses.DefenseMethods.TE import thermometer_encoding
                therm_inputs = thermometer_encoding(samples=torch.from_numpy(
                    self.adv_samples).to(self.device),
                                                    level=level,
                                                    device=self.device)

                predication = predict(model=te_defended_model,
                                      samples=therm_inputs,
                                      device=self.device)
                labels = torch.argmax(predication, 1).cpu().numpy()
                return labels
        else:
            if defense_name == 'RC':
                print(
                    '\n##{}## defense is a kind of region-based classification defenses ... '
                    .format(defense_name))
                from Defenses.DefenseMethods.RC import RCDefense
                num_points = 1000

                assert 'rc_radius' in kwargs
                radius = kwargs['rc_radius']
                rc = RCDefense(model=self.raw_model,
                               defense_name='RC',
                               dataset=self.dataset,
                               device=self.device,
                               num_points=num_points)

                labels = rc.region_based_classification(
                    samples=self.adv_samples, radius=radius)
                return labels
            else:
                raise ValueError('{} is not supported!!!'.format(defense_name))
コード例 #8
0
    def __init__(self,
                 DataSet='MNIST',
                 AttackName='LLC',
                 AdvExamplesDir='../AdversarialExampleDatasets/',
                 device=torch.device('cpu')):
        """

        :param DataSet:
        :param AttackName:
        :param AdvExamplesDir:
        :param device:
        """
        self.device = device

        # check and set the support data set
        assert DataSet.upper() in ['MNIST', 'CIFAR10'
                                   ], "The data set must be MNIST or CIFAR10"
        self.dataset = DataSet.upper()

        # load the raw model
        raw_model_location = '{}{}/model/{}_raw.pt'.format(
            '../RawModels/', self.dataset, self.dataset)
        if self.dataset == 'MNIST':
            self.raw_model = MNISTConvNet().to(device)
            self.raw_model.load(path=raw_model_location, device=device)
        else:
            self.raw_model = resnet20_cifar().to(device)
            self.raw_model.load(path=raw_model_location, device=device)
        self.raw_model.eval()

        # check and set the supported attack name
        self.attack_name = AttackName.upper()
        supported_un_targeted = [
            'FGSM', 'RFGSM', 'BIM', 'PGD', 'UMIFGSM', 'DEEPFOOL', 'UAP', 'OM'
        ]
        supported_targeted = [
            'LLC', "RLLC", 'ILLC', 'JSMA', 'TMIFGSM', 'BLB', 'CW2', 'EAD'
        ]
        assert self.attack_name in supported_un_targeted or self.attack_name in supported_targeted, \
            "\nCurrently, our implementation support attacks of FGSM, RFGSM, BIM, UMIFGSM, DeepFool, LLC, RLLC, ILLC, TMIFGSM, JSMA, CW2,....\n"

        # set the Target (UA or TA) according to the AttackName
        if self.attack_name.upper() in supported_un_targeted:
            self.Targeted = False
            print('the # {} # attack is a kind of Un-targeted attacks'.format(
                self.attack_name))
        else:
            self.Targeted = True
            print('the # {} # attack is a kind of Targeted attacks'.format(
                self.attack_name))

        # load the adversarial examples / corresponding adversarial labels / corresponding true labels /
        self.adv_samples = np.load('{}{}/{}/{}_AdvExamples.npy'.format(
            AdvExamplesDir, self.attack_name, self.dataset,
            self.attack_name)).astype(np.float32)
        self.adv_labels = np.load('{}{}/{}/{}_AdvLabels.npy'.format(
            AdvExamplesDir, self.attack_name, self.dataset, self.attack_name))
        self.true_labels = np.load('{}{}/{}/{}_TrueLabels.npy'.format(
            AdvExamplesDir, self.attack_name, self.dataset, self.attack_name))

        # get the targets labels
        # prepare the targeted label (least likely label) for LLC RLLC and ILLC
        if self.attack_name.upper() in ['LLC', 'RLLC', 'ILLC']:
            self.targets_samples = np.load('{}{}/{}_llc.npy'.format(
                '../CleanDatasets/', self.dataset, self.dataset))
        else:
            self.targets_samples = np.load('{}{}/{}_targets.npy'.format(
                '../CleanDatasets/', self.dataset, self.dataset))
コード例 #9
0
ファイル: DD_Test.py プロジェクト: zjz5250/DEEPSEC
def main(args):
    # Device configuration
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_index
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # Set the random seed manually for reproducibility.
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    torch.manual_seed(args.seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)
    random.seed(args.seed)

    # Get training parameters, set up model frames and then get the train_loader and test_loader
    dataset = args.dataset.upper()
    assert dataset == 'MNIST' or dataset == 'CIFAR10'
    if dataset == 'MNIST':
        training_parameters = MNIST_Training_Parameters
        model_framework = MNISTConvNet().to(device)
        batch_size = training_parameters['batch_size']
        # train_loader for training the initial model
        train_loader, valid_loader = get_mnist_train_validate_loader(
            dir_name='../RawModels/MNIST/',
            batch_size=batch_size,
            valid_size=0.1,
            shuffle=True)
        # raw train_loader (no augmentation) for constructing the SoftLabelDataset and then used to train the distilled model
        raw_train_loader, raw_valid_loader = get_mnist_train_validate_loader(
            dir_name='../RawModels/MNIST/',
            batch_size=batch_size,
            valid_size=0.1,
            shuffle=False)
        # testing dataset loader
        test_loader = get_mnist_test_loader(dir_name='../RawModels/MNIST/',
                                            batch_size=batch_size)
    else:
        training_parameters = CIFAR10_Training_Parameters
        model_framework = resnet20_cifar().to(device)
        batch_size = training_parameters['batch_size']
        # train_loader for training the initial model
        train_loader, valid_loader = get_cifar10_train_validate_loader(
            dir_name='../RawModels/CIFAR10/',
            batch_size=batch_size,
            valid_size=0.1,
            augment=True,
            shuffle=True)
        # raw train_loader (no augmentation) for constructing the SoftLabelDataset and then used to train the distilled model
        raw_train_loader, raw_valid_loader = get_cifar10_train_validate_loader(
            dir_name='../RawModels/CIFAR10/',
            batch_size=batch_size,
            valid_size=0.1,
            augment=False,
            shuffle=False)
        # testing dataset loader
        test_loader = get_cifar10_test_loader(dir_name='../RawModels/CIFAR10/',
                                              batch_size=batch_size)

    defense_name = 'DD'
    dd = DistillationDefense(model=model_framework,
                             defense_name=defense_name,
                             dataset=dataset,
                             temperature=args.temp,
                             training_parameters=training_parameters,
                             device=device)
    dd.defense(initial_flag=args.initial,
               train_loader=train_loader,
               validation_loader=valid_loader,
               raw_train=raw_train_loader,
               raw_valid=raw_valid_loader,
               test_loader=test_loader)
コード例 #10
0
ファイル: EAT_Test.py プロジェクト: zjz5250/DEEPSEC
def main(args):
    # Device configuration
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_index
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # Set the random seed manually for reproducibility.
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    torch.manual_seed(args.seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)
    random.seed(args.seed)

    # Get training parameters, set up model frameworks and then get the train_loader and test_loader
    dataset = args.dataset.upper()
    assert dataset == 'MNIST' or dataset == 'CIFAR10'
    if dataset == 'MNIST':
        training_parameters = MNIST_Training_Parameters
        model_framework = MNISTConvNet().to(device)
        batch_size = training_parameters['batch_size']
        train_loader, valid_loader = get_mnist_train_validate_loader(
            dir_name='../RawModels/MNIST/',
            batch_size=batch_size,
            valid_size=0.1,
            shuffle=True)
        test_loader = get_mnist_test_loader(dir_name='../RawModels/MNIST/',
                                            batch_size=batch_size)
    else:
        training_parameters = CIFAR10_Training_Parameters
        model_framework = resnet20_cifar().to(device)
        batch_size = training_parameters['batch_size']
        train_loader, valid_loader = get_cifar10_train_validate_loader(
            dir_name='../RawModels/CIFAR10/',
            batch_size=batch_size,
            valid_size=0.1,
            shuffle=True)
        test_loader = get_cifar10_test_loader(dir_name='../RawModels/CIFAR10/',
                                              batch_size=batch_size)

    defense_name = 'EAT'
    eat_params = {'eps': args.eps, 'alpha': args.alpha}

    eat = EATDefense(model=model_framework,
                     defense_name=defense_name,
                     dataset=dataset,
                     training_parameters=training_parameters,
                     device=device,
                     **eat_params)

    # train the external models
    if args.train_externals:
        print('\nStart to train the external models ......\n')
        eat.train_external_model_group(train_loader=train_loader,
                                       validation_loader=valid_loader)

    # load the external models
    pre_train_models = eat.load_external_model_group(
        model_dir='../DefenseEnhancedModels/EAT/', test_loader=test_loader)

    eat.defense(pre_trained_models=pre_train_models,
                train_loader=train_loader,
                validation_loader=valid_loader)
コード例 #11
0
def main(args):
    # Device configuration
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_index
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # Set the random seed manually for reproducibility.
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    torch.manual_seed(args.seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)
    random.seed(args.seed)

    # prepare the dataset name, candidate num, dataset location and raw model location
    dataset = args.dataset.upper()
    num = args.number
    dataset_location = '../RawModels/{}/'.format(dataset)
    raw_model_location = '../RawModels/{}/model/{}_raw.pt'.format(
        dataset, dataset)
    print(
        "\nStarting to select {} {} Candidates Example, which are correctly classified by the Raw Model from {}\n"
        .format(num, dataset, raw_model_location))

    # load the raw model and testing dataset
    assert args.dataset == 'MNIST' or args.dataset == 'CIFAR10'
    if dataset == 'MNIST':
        raw_model = MNISTConvNet().to(device)
        raw_model.load(path=raw_model_location, device=device)
        test_loader = get_mnist_test_loader(dir_name=dataset_location,
                                            batch_size=1,
                                            shuffle=False)
    else:
        raw_model = resnet20_cifar().to(device)
        raw_model.load(path=raw_model_location, device=device)
        test_loader = get_cifar10_test_loader(dir_name=dataset_location,
                                              batch_size=1,
                                              shuffle=False)

    # get the successfully classified examples

    successful = []
    raw_model.eval()

    with torch.no_grad():
        for image, label in test_loader:
            image = image.to(device)
            label = label.to(device)
            output = raw_model(image)
            _, predicted = torch.max(output.data, 1)
            if predicted == label:
                _, least_likely_class = torch.min(output.data, 1)
                successful.append([image, label, least_likely_class])

    print(len(successful))
    candidates = random.sample(successful, num)

    candidate_images = []
    candidate_labels = []
    candidates_llc = []
    candidate_targets = []

    for index in range(len(candidates)):
        image = candidates[index][0].cpu().numpy()
        image = np.squeeze(image, axis=0)
        candidate_images.append(image)

        label = candidates[index][1].cpu().numpy()[0]
        llc = candidates[index][2].cpu().numpy()[0]

        # selection for the targeted label
        classes = [i for i in range(10)]
        classes.remove(label)
        target = random.sample(classes, 1)[0]

        one_hot_label = [0 for i in range(10)]
        one_hot_label[label] = 1

        one_hot_llc = [0 for i in range(10)]
        one_hot_llc[llc] = 1

        one_hot_target = [0 for i in range(10)]
        one_hot_target[target] = 1

        candidate_labels.append(one_hot_label)
        candidates_llc.append(one_hot_llc)
        candidate_targets.append(one_hot_target)

    candidate_images = np.array(candidate_images)
    candidate_labels = np.array(candidate_labels)
    candidates_llc = np.array(candidates_llc)
    candidate_targets = np.array(candidate_targets)

    if dataset not in os.listdir('./'):
        os.mkdir('./{}/'.format(dataset))
    else:
        shutil.rmtree('{}'.format(dataset))
        os.mkdir('./{}/'.format(dataset))

    np.save('./{}/{}_inputs.npy'.format(dataset, dataset), candidate_images)
    np.save('./{}/{}_labels.npy'.format(dataset, dataset), candidate_labels)
    np.save('./{}/{}_llc.npy'.format(dataset, dataset), candidates_llc)
    np.save('./{}/{}_targets.npy'.format(dataset, dataset), candidate_targets)
コード例 #12
0
ファイル: train_cifar10.py プロジェクト: zjz5250/DEEPSEC
def main(args):
    # Device configuration
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_index
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # Set the random seed manually for reproducibility.
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    torch.manual_seed(args.seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)
    random.seed(args.seed)

    # get the training and testing dataset loaders
    train_loader, valid_loader = get_cifar10_train_validate_loader(
        dir_name='./CIFAR10/',
        batch_size=CIFAR10_Training_Parameters['batch_size'],
        valid_size=0.1,
        shuffle=True)
    test_loader = get_cifar10_test_loader(
        dir_name='./CIFAR10/',
        batch_size=CIFAR10_Training_Parameters['batch_size'])

    # set up the model and optimizer
    resnet_model = resnet20_cifar().to(device)
    optimizer = optim.Adam(resnet_model.parameters(),
                           lr=CIFAR10_Training_Parameters['lr'])

    # Training
    best_val_acc = None
    model_saver = './CIFAR10/model/CIFAR10_' + 'raw' + '.pt'
    for epoch in range(CIFAR10_Training_Parameters['num_epochs']):

        # training the model within one epoch
        train_one_epoch(model=resnet_model,
                        train_loader=train_loader,
                        optimizer=optimizer,
                        epoch=epoch,
                        device=device)
        # validation
        val_acc = validation_evaluation(model=resnet_model,
                                        validation_loader=valid_loader,
                                        device=device)

        # adjust the learning rate
        adjust_learning_rate(optimizer=optimizer, epoch=epoch)

        if not best_val_acc or round(val_acc, 4) >= round(best_val_acc, 4):
            if best_val_acc is not None:
                os.remove(model_saver)
            best_val_acc = val_acc
            resnet_model.save(name=model_saver)
        else:
            print(
                'Train Epoch{:>3}: validation dataset accuracy did not improve from {:.4f}\n'
                .format(epoch, best_val_acc))

    # Testing
    final_model = copy.deepcopy(resnet_model)
    final_model.load(path=model_saver, device=device)
    accuracy = testing_evaluation(model=final_model,
                                  test_loader=test_loader,
                                  device=device)
    print(
        'Finally, the ACCURACY of saved model [{}] on testing dataset is {:.2f}%\n'
        .format(final_model.model_name, accuracy * 100.0))
コード例 #13
0
ファイル: RC_Test.py プロジェクト: zjz5250/DEEPSEC
def main(args):
    # Device configuration
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_index
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # Set the random seed manually for reproducibility.
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    torch.manual_seed(args.seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)
    random.seed(args.seed)

    dataset = args.dataset.upper()
    assert dataset == 'MNIST' or dataset == 'CIFAR10'
    batch_size = 1000
    # get the training_parameters, raw_model, and test_loader
    model_location = '{}/{}/model/{}_raw.pt'.format('../RawModels', dataset, dataset)
    if dataset == 'MNIST':
        raw_model = MNISTConvNet().to(device)
        test_loader = get_mnist_test_loader(dir_name='../RawModels/MNIST/', batch_size=batch_size)
    else:
        raw_model = resnet20_cifar().to(device)
        test_loader = get_cifar10_test_loader(dir_name='../RawModels/CIFAR10/', batch_size=batch_size)

    raw_model.load(path=model_location, device=device)

    defense_name = 'RC'
    rc = RCDefense(model=raw_model, defense_name=defense_name, dataset=dataset, device=device, num_points=args.num_points)

    # search the radius r
    if args.search:
        # get the validation dataset (10% with the training dataset)
        print('start to search the radius r using validation dataset ...')
        if dataset == 'MNIST':
            _, valid_loader = get_mnist_train_validate_loader(dir_name='../RawModels/MNIST/', batch_size=batch_size, valid_size=0.02,
                                                              shuffle=True)
        else:
            _, valid_loader = get_cifar10_train_validate_loader(dir_name='../RawModels/CIFAR10/', batch_size=batch_size, valid_size=0.02,
                                                                shuffle=True)
        radius = rc.search_best_radius(validation_loader=valid_loader, radius_min=args.radius_min, radius_max=args.radius_max,
                                       radius_step=args.radius_step)
    else:
        radius = round(args.radius, 2)
    print('######\nthe radius for RC is set or searched as: {}\n######'.format(radius))

    # calculate the accuracy of region-based classification defense on testing dataset
    print('\nStart to calculate the accuracy of region-based classification defense on testing dataset')
    raw_model.eval()
    total = 0.0
    correct = 0.0
    with torch.no_grad():
        for images, labels in test_loader:
            # apply the region-based classification on images using radius
            rc_labels = rc.region_based_classification(samples=images, radius=radius)
            rc_labels = torch.from_numpy(rc_labels)

            total += labels.size(0)
            correct += (rc_labels == labels).sum().item()
        ratio = correct / total
        print('\nTest accuracy of the {} model on the testing dataset: {:.1f}/{:.1f} = {:.2f}%\n'.format(raw_model.model_name, correct, total,
                                                                                                         ratio * 100))