コード例 #1
0
ファイル: NEAT_Test.py プロジェクト: zuraSherl/defense_test
def main(args):
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_index
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    torch.manual_seed(args.seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)
    random.seed(args.seed)

    # dataset:将数据集名称转化为大写形式MNIST/CIFAR10
    dataset = args.dataset.upper()
    assert dataset == 'MNIST' or dataset == 'CIFAR10'
    # 获取MNIST/CIFAR10的模型训练参数,模型,训练集,验证集和测试集
    if dataset == 'MNIST':
        training_parameters = MNIST_Training_Parameters
        model_framework = MNISTConvNet().to(device)
        batch_size = training_parameters['batch_size']
        train_loader, valid_loader = get_mnist_train_validate_loader(
            dir_name='../data/MNIST/',
            batch_size=batch_size,
            valid_size=0.1,
            shuffle=True)
        test_loader = get_mnist_test_loader(dir_name='../data/MNIST/',
                                            batch_size=batch_size)
    else:
        training_parameters = CIFAR10_Training_Parameters
        model_framework = resnet20_cifar().to(device)
        batch_size = training_parameters['batch_size']
        train_loader, valid_loader = get_cifar10_train_validate_loader(
            dir_name='../data/CIFAR10/',
            batch_size=batch_size,
            valid_size=0.1,
            shuffle=True)
        test_loader = get_cifar10_test_loader(dir_name='../data/CIFAR10/',
                                              batch_size=batch_size)
    # defense_name:防御名称为EAT
    defense_name = 'NEAT'
    # eat_params:防御参数
    neat_params = {'eps': args.eps, 'alpha': args.alpha}
    # 将各种参数传入到NEAT防御中
    neat = NEATDefense(model=model_framework,
                       defense_name=defense_name,
                       dataset=dataset,
                       training_parameters=training_parameters,
                       device=device,
                       **neat_params)
    # 如果为True则需要重新训练预定义模型
    if args.train_externals:
        print('\nStart to train the external models ......\n')
        neat.train_external_model_group(train_loader=train_loader,
                                        validation_loader=valid_loader)
    # 加载预训练模型
    pre_train_models = neat.load_external_model_group(
        model_dir='../DefenseEnhancedModels/NEAT/', test_loader=test_loader)
    # 进行集成对抗训练,将最佳的模型参数保存到DefenseEnhancedModels/NEAT/MNIST_NEAT_enhanced.pt中或者CIFAR10
    neat.defense(pre_trained_models=pre_train_models,
                 train_loader=train_loader,
                 validation_loader=valid_loader)
コード例 #2
0
def main(args):
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_index
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # Set the random seed manually for reproducibility.
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    torch.manual_seed(args.seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)
    random.seed(args.seed)

    # 下载CIFAR10训练集,划分为训练集和测试集(然后进行分组),保存到CIFAR10文件夹下面
    train_loader, valid_loader = get_cifar10_train_validate_loader(
        dir_name='../data/CIFAR10/',
        batch_size=CIFAR10_Training_Parameters['batch_size'],
        valid_size=0.1,
        shuffle=True)
    # 下载CIFAR10测试集(然后进行分组),保存到CIFAR10文件夹下面
    test_loader = get_cifar10_test_loader(
        dir_name='../data/CIFAR10/',
        batch_size=CIFAR10_Training_Parameters['batch_size'])
    # 设置模型
    # **************引入的模型名称**************
    resnet_model = ResNet18().to(device)
    # 设置优化器
    optimizer = optim.Adam(resnet_model.parameters(),
                           lr=CIFAR10_Training_Parameters['lr'])
    # 训练
    # 最好的验证集精度
    best_val_acc = None
    # 训练模型参数保存路径:/CIFAR10/model/CIFAR10_raw.pt
    # **************不同模型需要修改名称**************
    model_saver = './CIFAR10/model/ResNet18_' + 'raw' + '.pt'
    # 进行epoch次循环训练
    for epoch in range(CIFAR10_Training_Parameters['num_epochs']):
        # 一次epoch训练
        train_one_epoch(model=resnet_model,
                        train_loader=train_loader,
                        optimizer=optimizer,
                        epoch=epoch,
                        device=device)
        # 验证集的精度
        val_acc = validation_evaluation(model=resnet_model,
                                        validation_loader=valid_loader,
                                        device=device)
        # 学习率调整
        adjust_learning_rate(optimizer=optimizer, epoch=epoch)
        # 每一次epoch后验证集的精度大于最好的精度时(移除模型保存路径),或者best_val_acc为None时,更新最佳精度,然后将模型参数重新写入保存路径中
        if not best_val_acc or round(val_acc, 4) >= round(best_val_acc, 4):
            if best_val_acc is not None:
                os.remove(model_saver)
            best_val_acc = val_acc
            resnet_model.save(name=model_saver)
        # 否则提示精度未发生提高
        else:
            print(
                'Train Epoch{:>3}: validation dataset accuracy did not improve from {:.4f}\n'
                .format(epoch, best_val_acc))
    # 测试
    # 复制resnet_model
    final_model = copy.deepcopy(resnet_model)
    # 加载final_model
    final_model.load(path=model_saver, device=device)
    # 计算模型在测试集上面的精度并输出
    accuracy = testing_evaluation(model=final_model,
                                  test_loader=test_loader,
                                  device=device)
    # 打印模型在测试集上的精度
    print(
        'Finally, the ACCURACY of saved model [{}] on testing dataset is {:.2f}%\n'
        .format(final_model.model_name, accuracy * 100.0))
コード例 #3
0
def main(args):
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_index
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    torch.manual_seed(args.seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)
    random.seed(args.seed)
    # dataset:数据集名称MNIST或CIFAR10
    dataset = args.dataset.upper()
    assert dataset == 'MNIST' or dataset == 'CIFAR10'
    # 加载模型和测试集
    raw_model_location = '{}{}/model/{}_raw.pt'.format('../data/', dataset,
                                                       dataset)
    if dataset == 'MNIST':
        raw_model = MNIST_CNN().to(device)
        raw_model.load(path=raw_model_location, device=device)
        test_loader = get_mnist_test_loader(dir_name='../data/MNIST/',
                                            batch_size=30)
    else:
        raw_model = ResNet18().to(device)
        raw_model.load(path=raw_model_location, device=device)
        test_loader = get_cifar10_test_loader(dir_name='../data/CIFAR10/',
                                              batch_size=25)
    raw_model.eval()

    # 原始模型对测试集进行预测
    predicted_raw, true_label = prediction(model=raw_model,
                                           test_loader=test_loader,
                                           device=device)

    # 需要再训练的防御
    re_train_defenses = {'NAT', 'RLT', 'RLT1', 'RLT2', 'RLT3', 'EAT', 'UAPAT'}
    # 其他防御
    other_defenses = {'NRC'}
    # defense_name:防御名称
    defense_name = args.defense.upper().strip()
    # 如果是再训练模型防御
    if defense_name in re_train_defenses:
        print(
            '\nthe ##{}## defense is a kind of complete defenses that retrain the model'
            .format(defense_name))
        # 加载防御模型
        # defended_model_location:防御模型位置DefenseEnhancedModels/defense_name/CIFAR10_defense_name_enhanced.pt或者MNIST
        defended_model_location = '{}/{}/{}_{}_enhanced.pt'.format(
            '../DefenseEnhancedModels', defense_name, dataset, defense_name)
        defended_model = MNIST_CNN().to(
            device) if dataset == 'MNIST' else ResNet18().to(device)
        defended_model.load(path=defended_model_location, device=device)
        defended_model.eval()
        # 利用防御模型进行标签预测
        predicted_defended, _ = prediction(model=defended_model,
                                           test_loader=test_loader,
                                           device=device)
        # 计算防御指标
        raw_acc, def_acc, cav, crr, csr = defense_utility_measure(
            predicted_defended, predicted_raw, true_label)
    else:
        if defense_name == 'NRC':
            print(
                '\n##{}## defense is a kind of region-based classification defenses ... '
                .format(defense_name))
            from Defenses.DefenseMethods.NRC import NRCDefense
            num_points = 1000
            radius = args.radius
            mean = args.mean
            std = args.std
            nrc = NRCDefense(model=raw_model,
                             defense_name='NRC',
                             dataset=dataset,
                             device=device,
                             num_points=num_points)
            predicted_defended = []
            with torch.no_grad():
                for index, (images, labels) in enumerate(test_loader):
                    nrc_labels = nrc.region_based_classification(
                        samples=images, radius=radius, mean=mean, std=std)
                    predicted_defended.extend(nrc_labels)
            predicted_defended = np.array(predicted_defended)
            correct_prediction_def = np.equal(predicted_defended, true_label)
            def_acc = np.mean(correct_prediction_def.astype(float))
            correct_prediction_raw = np.equal(np.argmax(predicted_raw, axis=1),
                                              true_label)
            raw_acc = np.mean(correct_prediction_raw.astype(float))
            # Classification Accuracy Variance(CAV)
            cav = def_acc - raw_acc
            # Find the index of correct predicted examples by defence-enhanced model and raw model
            idx_def = np.squeeze(np.argwhere(correct_prediction_def == True))
            idx_raw = np.squeeze(np.argwhere(correct_prediction_raw == True))
            idx = np.intersect1d(idx_def, idx_raw, assume_unique=True)
            crr = (len(idx_def) - len(idx)) / len(predicted_raw)
            csr = (len(idx_raw) - len(idx)) / len(predicted_raw)
        else:
            raise ValueError('{} is not supported!!!'.format(defense_name))
    # 输出防御指标的值
    print("****************************")
    print(
        "The utility evaluation results of the {} defense for {} Dataset are as follow:"
        .format(defense_name, dataset))
    print('Acc of Raw Model:\t\t{:.2f}%'.format(raw_acc * 100))
    print('Acc of {}-enhanced Model:\t{:.2f}%'.format(defense_name,
                                                      def_acc * 100))
    print('CAV: {:.2f}%'.format(cav * 100))
    print('CRR: {:.2f}%'.format(crr * 100))
    print('CSR: {:.2f}%'.format(csr * 100))
    print("****************************")
コード例 #4
0
def main(args):
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_index
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # Set the random seed manually for reproducibility.
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    torch.manual_seed(args.seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)
    random.seed(args.seed)

    # dataset:数据集名称MNIST或CIFAR10
    # num:选取的干净样本的数量
    # dataset_location:数据集所在位置data/CIFAR10/或者MNIST
    # raw_model_location:模型所在位置data/CIFAR10/model/CIFAR10_raw.pt或者MNIST
    dataset = args.dataset.upper()
    num = args.number
    # *****************数据集存放的位置*****************
    dataset_location = '../data/{}/'.format(dataset)
    raw_model_location = '../data/{}/model/{}_raw.pt'.format(dataset, dataset)
    print(
        "\nStarting to select {} {} Candidates Example, which are correctly classified by the Raw Model from {}\n"
        .format(num, dataset, raw_model_location))
    # 加载模型,获取测试集
    # raw_model:模型
    # test_loader:测试集
    # load the raw model and testing dataset
    assert args.dataset == 'MNIST' or args.dataset == 'CIFAR10'
    if dataset == 'MNIST':
        raw_model = MNIST_CNN().to(device)
        raw_model.load(path=raw_model_location, device=device)
        test_loader = get_mnist_test_loader(dir_name=dataset_location,
                                            batch_size=1,
                                            shuffle=False)
    else:
        raw_model = ResNet18().to(device)
        raw_model.load(path=raw_model_location, device=device)
        test_loader = get_cifar10_test_loader(dir_name=dataset_location,
                                              batch_size=1,
                                              shuffle=False)
    # 获取分类正确的测试集
    # successful:测试集经过模型,保留被正确预测的图像和标签以及它们对应softmax最小输出的标签
    successful = []
    raw_model.eval()
    with torch.no_grad():
        for image, label in test_loader:
            image = image.to(device)
            label = label.to(device)
            output = raw_model(image)
            _, predicted = torch.max(output.data, 1)
            if predicted == label:
                _, least_likely_class = torch.min(output.data, 1)
                successful.append([image, label, least_likely_class])
    print(len(successful))
    # 随机选取num个正确分类的图像
    candidates = random.sample(successful, num)

    candidate_images = []
    candidate_labels = []
    candidates_llc = []
    candidate_targets = []
    for index in range(len(candidates)):
        # 将选择的图片,标签和最不可能的标签分开
        image = candidates[index][0].cpu().numpy()
        image = np.squeeze(image, axis=0)
        candidate_images.append(image)
        label = candidates[index][1].cpu().numpy()[0]
        llc = candidates[index][2].cpu().numpy()[0]
        # 生成0~9的10个标签,去除真实标签,随机选择一个标签
        classes = [i for i in range(10)]
        classes.remove(label)
        target = random.sample(classes, 1)[0]
        # 将随机目标标签,最不可能分类目标标签和真实标签转化为one-hot标签保存
        one_hot_label = [0 for i in range(10)]
        one_hot_label[label] = 1
        one_hot_llc = [0 for i in range(10)]
        one_hot_llc[llc] = 1
        one_hot_target = [0 for i in range(10)]
        one_hot_target[target] = 1
        candidate_labels.append(one_hot_label)
        candidates_llc.append(one_hot_llc)
        candidate_targets.append(one_hot_target)
    # 图像
    candidate_images = np.array(candidate_images)
    # 图像对应真实one-hot标签
    candidate_labels = np.array(candidate_labels)
    # 图像对应最不可能分类的one-hot标签
    candidates_llc = np.array(candidates_llc)
    # 图像对应非真实标签的随机one-hot标签
    candidate_targets = np.array(candidate_targets)
    # 打开CIFAR10/或MNIST/文件夹
    if dataset not in os.listdir('./'):
        os.mkdir('./{}/'.format(dataset))
    else:
        shutil.rmtree('{}'.format(dataset))
        os.mkdir('./{}/'.format(dataset))
    # 将图片,标签,最不可能分类的标签,目标标签存入clean_datasets/CIFAR10/CIFAR10_inputs.npy等等或者MNIST
    np.save('./{}/{}_inputs.npy'.format(dataset, dataset), candidate_images)
    np.save('./{}/{}_labels.npy'.format(dataset, dataset), candidate_labels)
    np.save('./{}/{}_llc.npy'.format(dataset, dataset), candidates_llc)
    np.save('./{}/{}_targets.npy'.format(dataset, dataset), candidate_targets)
コード例 #5
0
def main(args):
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_index
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    torch.manual_seed(args.seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)
    random.seed(args.seed)

    # dataset:数据集名称转化为大写形式MNIST/CIFAR10
    dataset = args.dataset.upper()
    assert dataset == 'MNIST' or dataset == 'CIFAR10'
    # batch_size:每个分组的大小为1000
    batch_size = 1000
    # 获取MNIST/CIFAR10的模型,测试集
    model_location = '{}/{}/model/{}_raw.pt'.format('../data', dataset,
                                                    dataset)
    if dataset == 'MNIST':
        raw_model = MNIST_CNN().to(device)
        test_loader = get_mnist_test_loader(dir_name='../data/MNIST/',
                                            batch_size=batch_size)
    else:
        raw_model = ResNet18().to(device)
        test_loader = get_cifar10_test_loader(dir_name='../data/CIFAR10/',
                                              batch_size=batch_size)

    # 加载MNIST/CIFAR10的模型
    raw_model.load(path=model_location, device=device)
    # defense_name:防御名称为NRC
    defense_name = 'NRC'
    # 将参数传入NRC防御中
    nrc = NRCDefense(model=raw_model,
                     defense_name=defense_name,
                     dataset=dataset,
                     device=device,
                     num_points=args.num_points)

    # 如果要进行最优半径的搜索
    if args.search:
        # get the validation dataset (10% with the training dataset)
        print('start to search the radius r using validation dataset ...')
        # 获取MNIST/CIFAR10的验证集
        if dataset == 'MNIST':
            _, valid_loader = get_mnist_train_validate_loader(
                dir_name='../data/MNIST/',
                batch_size=batch_size,
                valid_size=0.02,
                shuffle=True)
        else:
            _, valid_loader = get_cifar10_train_validate_loader(
                dir_name='../data/CIFAR10/',
                batch_size=batch_size,
                valid_size=0.02,
                shuffle=True)
        # radius:通过验证集得到最优的半径值
        radius = nrc.search_best_radius(validation_loader=valid_loader,
                                        radius_min=args.radius_min,
                                        radius_max=args.radius_max,
                                        radius_step=args.radius_step)
    # 否则半径值为默认的0.01
    else:
        radius = round(args.radius, 2)
    print(
        '######\nthe radius for NRC is set or searched as: {}\n######'.format(
            radius))

    # 计算NRC模型在测试集上的分类精度
    print(
        '\nStart to calculate the accuracy of region-based classification defense on testing dataset'
    )
    raw_model.eval()
    total = 0.0
    correct = 0.0
    with torch.no_grad():
        for images, labels in test_loader:
            nrc_labels = nrc.region_based_classification(samples=images,
                                                         radius=radius,
                                                         mean=args.mean,
                                                         std=args.std)
            nrc_labels = torch.from_numpy(nrc_labels)
            total += labels.size(0)
            correct += (nrc_labels == labels).sum().item()
        ratio = correct / total
        print(
            '\nTest accuracy of the {} model on the testing dataset: {:.1f}/{:.1f} = {:.2f}%\n'
            .format(raw_model.model_name, correct, total, ratio * 100))