Example #1
0
def main(args):
    # Device configuration
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_index
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # Set the random seed manually for reproducibility.
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    torch.manual_seed(args.seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)
    random.seed(args.seed)

    batch_size = 200
    model_location = '{}/{}/model/{}_raw.pt'.format('../RawModels',
                                                    args.dataset, args.dataset)
    # Get training parameters, set up model frameworks and then get the train_loader and test_loader
    dataset = args.dataset.upper()
    assert dataset == 'MNIST' or dataset == 'CIFAR10'
    if dataset == 'MNIST':
        raw_model = MNISTConvNet().to(device)
        test_loader = get_mnist_test_loader(dir_name='../RawModels/MNIST/',
                                            batch_size=batch_size)
    else:
        raw_model = resnet20_cifar().to(device)
        test_loader = get_cifar10_test_loader(dir_name='../RawModels/CIFAR10/',
                                              batch_size=batch_size)
    raw_model.load(path=model_location, device=device)

    defense_name = 'RT'
    rt = RTDefense(model=raw_model,
                   defense_name=defense_name,
                   dataset=dataset,
                   device=device)

    # predicting the testing dataset using the randomization transformation defense
    raw_model.eval()
    total = 0.0
    correct = 0.0
    with torch.no_grad():
        for index, (images, labels) in enumerate(test_loader):
            # input images first go through the randomization transformation layer and then the resulting images are feed into the original model
            transformed_images = rt.randomization_transformation(
                samples=images,
                original_size=images.shape[-1],
                final_size=args.resize)
            outputs = raw_model(transformed_images)

            labels = labels.to(device)
            _, predicted = torch.max(outputs.data, 1)
            total = total + labels.size(0)
            correct = correct + (predicted == labels).sum().item()
        ratio = correct / total
        print(
            '\nTest accuracy of the {} model on the testing dataset: {:.1f}/{:.1f} = {:.2f}%\n'
            .format(raw_model.model_name, correct, total, ratio * 100))
Example #2
0
def main(args):
    # Device configuration
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_index
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # Set the random seed manually for reproducibility.
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    torch.manual_seed(args.seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)
    random.seed(args.seed)

    dataset = args.dataset.upper()
    assert dataset == 'MNIST' or dataset == 'CIFAR10'

    # load the raw model / testing dataset loader
    raw_model_location = '{}{}/model/{}_raw.pt'.format('../RawModels/', dataset, dataset)
    if dataset == 'MNIST':
        raw_model = MNISTConvNet().to(device)
        raw_model.load(path=raw_model_location, device=device)
        test_loader = get_mnist_test_loader(dir_name='../RawModels/MNIST/', batch_size=30)
    else:
        raw_model = resnet20_cifar().to(device)
        raw_model.load(path=raw_model_location, device=device)
        test_loader = get_cifar10_test_loader(dir_name='../RawModels/CIFAR10/', batch_size=25)
    raw_model.eval()

    # get predictions of the raw model on test datasets
    predicted_raw, true_label = prediction(model=raw_model, test_loader=test_loader, device=device)

    re_train_defenses = {'NAT', 'EAT', 'PAT', 'DD', 'IGR'}
    input_transformation_defenses = {'EIT', 'RT', 'PD', 'TE'}
    other_defenses = {'RC'}

    defense_name = args.defense.upper().strip()
    if defense_name in re_train_defenses:
        print('\nthe ##{}## defense is a kind of complete defenses that retrain the model'.format(defense_name))
        # load the defense-enhanced model
        defended_model_location = '{}/{}/{}_{}_enhanced.pt'.format('../DefenseEnhancedModels', defense_name, dataset, defense_name)
        defended_model = MNISTConvNet().to(device) if dataset == 'MNIST' else resnet20_cifar().to(device)
        defended_model.load(path=defended_model_location, device=device)
        defended_model.eval()
        predicted_defended, _ = prediction(model=defended_model, test_loader=test_loader, device=device)
        raw_acc, def_acc, cav, crr, csr, ccv, cos = defense_utility_measure(predicted_defended, predicted_raw, true_label)

    elif defense_name in input_transformation_defenses:
        print('\nthe ##{}## defense is a kind of complete defense hat need to transform the images ... '.format(defense_name))
        if defense_name == 'EIT':

            from Defenses.DefenseMethods.EIT import EITDefense, TransformedDataset
            eit_params = {
                'crop_size': args.crop_size,
                'lambda_tv': args.lambda_tv,
                'JPEG_quality': args.JPEG_quality,
                'bit_depth': args.bit_depth
            }
            defended_model = MNISTConvNet().to(device) if dataset == 'MNIST' else resnet20_cifar().to(device)
            EIT = EITDefense(model=defended_model, defense_name=defense_name, dataset=dataset, re_training=False, training_parameters=None,
                             device=device, **eit_params)
            transformed_test_data_numpy, test_label_numpy = EIT.transforming_dataset(data_loader=test_loader)
            transformed_test_dataset = TransformedDataset(images=torch.from_numpy(transformed_test_data_numpy),
                                                          labels=torch.from_numpy(test_label_numpy), dataset=dataset, transform=None)
            transformed_test_loader = torch.utils.data.DataLoader(transformed_test_dataset, batch_size=100, shuffle=False)

            defended_model_location = '{}/{}/{}_{}_enhanced.pt'.format('../DefenseEnhancedModels', defense_name, dataset, defense_name)
            defended_model = defended_model.to(device)
            defended_model.load(path=defended_model_location, device=device)
            defended_model.eval()

            predicted_defended, _ = prediction(model=defended_model, test_loader=transformed_test_loader, device=device)
            raw_acc, def_acc, cav, crr, csr, ccv, cos = defense_utility_measure(predicted_defended, predicted_raw, true_label)

        elif defense_name == 'RT':
            final_size = args.resize
            assert isinstance(final_size, int)
            warnings.warn(message='For the RT defense, the #resize# parameter is specified as {}, please check ...'.format(final_size))
            predicted_defended = rt_prediction(model=raw_model, dataset=dataset, data_loader=test_loader, final_size=final_size, device=device)
            # test the utility performance of defended model
            raw_acc, def_acc, cav, crr, csr, ccv, cos = defense_utility_measure(predicted_defended, predicted_raw, true_label)
        elif defense_name == 'PD':
            epsilon = args.epsilon
            warnings.warn(message='For the PixelDefend defense, the #epsilon# parameter is specified as {}, please check ...'.format(epsilon))
            predicted_defended = pd_prediction(model=raw_model, dataset=dataset, data_loader=test_loader, epsilon=epsilon, device=device)
            raw_acc, def_acc, cav, crr, csr, ccv, cos = defense_utility_measure(predicted_defended, predicted_raw, true_label)
        else:
            level = args.level
            assert defense_name == 'TE' and isinstance(level, int)
            warnings.warn(message='For the TE defense, the #level# parameter is specified as {}, please check ...'.format(level))

            # load the defense-enhanced model (for TE)
            defended_model_location = '{}/{}/{}_{}_enhanced.pt'.format('../DefenseEnhancedModels', defense_name, dataset, defense_name)
            defended_model = MNISTConvNet(thermometer=True, level=level) if dataset == 'MNIST' else resnet20_cifar(thermometer=True, level=level)
            defended_model = defended_model.to(device)
            defended_model.load(path=defended_model_location, device=device)
            defended_model.eval()
            predicted_defended = te_prediction(model=defended_model, data_loader=test_loader, level=level, device=device)
            raw_acc, def_acc, cav, crr, csr, ccv, cos = defense_utility_measure(predicted_defended, predicted_raw, true_label)

    else:
        if defense_name == 'RC':
            print('\n##{}## defense is a kind of region-based classification defenses ... '.format(defense_name))
            from Defenses.DefenseMethods.RC import RCDefense
            num_points = 1000
            radius = args.radius
            rc = RCDefense(model=raw_model, defense_name='RC', dataset=dataset, device=device, num_points=num_points)

            predicted_defended = []
            with torch.no_grad():
                for index, (images, labels) in enumerate(test_loader):
                    rc_labels = rc.region_based_classification(samples=images, radius=radius)
                    predicted_defended.extend(rc_labels)
            predicted_defended = np.array(predicted_defended)

            # classification accuracy of defense-enhanced model
            correct_prediction_def = np.equal(predicted_defended, true_label)
            def_acc = np.mean(correct_prediction_def.astype(float))
            # classification accuracy of raw model
            correct_prediction_raw = np.equal(np.argmax(predicted_raw, axis=1), true_label)
            raw_acc = np.mean(correct_prediction_raw.astype(float))
            # Classification Accuracy Variance(CAV)
            cav = def_acc - raw_acc

            # Find the index of correct predicted examples by defence-enhanced model and raw model
            idx_def = np.squeeze(np.argwhere(correct_prediction_def == True))
            idx_raw = np.squeeze(np.argwhere(correct_prediction_raw == True))
            idx = np.intersect1d(idx_def, idx_raw, assume_unique=True)

            # Compute the Classification Rectify Ratio(CRR) & Classification Sacrifice Ratio(CSR)
            crr = (len(idx_def) - len(idx)) / len(predicted_raw)
            csr = (len(idx_raw) - len(idx)) / len(predicted_raw)
            ccv = cos = 0

        else:
            raise ValueError('{} is not supported!!!'.format(defense_name))

    print("****************************")
    print("The utility evaluation results of the {} defense for {} Dataset are as follow:".format(defense_name, dataset))
    print('Acc of Raw Model:\t\t{:.2f}%'.format(raw_acc * 100))
    print('Acc of {}-enhanced Model:\t{:.2f}%'.format(defense_name, def_acc * 100))
    print('CAV: {:.2f}%'.format(cav * 100))
    print('CRR: {:.2f}%'.format(crr * 100))
    print('CSR: {:.2f}%'.format(csr * 100))
    print('CCV: {:.2f}%'.format(ccv * 100))
    print('COS: {:.4f}'.format(cos))
    print("****************************")
Example #3
0
def main(args):
    # Device configuration
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_index
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # Set the random seed manually for reproducibility.
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    torch.manual_seed(args.seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)
    random.seed(args.seed)

    # get the training and testing dataset loaders
    train_loader, valid_loader = get_mnist_train_validate_loader(
        dir_name='./MNIST/',
        batch_size=MNIST_Training_Parameters['batch_size'],
        valid_size=0.1,
        shuffle=True)
    test_loader = get_mnist_test_loader(
        dir_name='./MNIST/',
        batch_size=MNIST_Training_Parameters['batch_size'])

    # set up the model and optimizer
    mnist_model = MNISTConvNet().to(device)
    optimizer = optim.SGD(mnist_model.parameters(),
                          lr=MNIST_Training_Parameters['learning_rate'],
                          momentum=MNIST_Training_Parameters['momentum'],
                          weight_decay=MNIST_Training_Parameters['decay'],
                          nesterov=True)

    # Training
    best_val_acc = None
    model_saver = './MNIST/model/MNIST_' + 'raw' + '.pt'
    for epoch in range(MNIST_Training_Parameters['num_epochs']):

        # training the model within one epoch
        train_one_epoch(model=mnist_model,
                        train_loader=train_loader,
                        optimizer=optimizer,
                        epoch=epoch,
                        device=device)
        # validation
        val_acc = validation_evaluation(model=mnist_model,
                                        validation_loader=valid_loader,
                                        device=device)

        if not best_val_acc or round(val_acc, 4) >= round(best_val_acc, 4):
            if best_val_acc is not None:
                os.remove(model_saver)
            best_val_acc = val_acc
            mnist_model.save(name=model_saver)
        else:
            print(
                'Train Epoch{:>3}: validation dataset accuracy did not improve from {:.4f}\n'
                .format(epoch, best_val_acc))

    # Testing
    final_model = copy.deepcopy(mnist_model)
    final_model.load(path=model_saver, device=device)
    accuracy = testing_evaluation(model=final_model,
                                  test_loader=test_loader,
                                  device=device)
    print(
        'Finally, the ACCURACY of saved model [{}] on testing dataset is {:.2f}%\n'
        .format(final_model.model_name, accuracy * 100.0))
Example #4
0
def main(args):
    # Device configuration
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_index
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # Set the random seed manually for reproducibility.
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    torch.manual_seed(args.seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)
    random.seed(args.seed)

    # Get training parameters, set up model frames and then get the train_loader and test_loader
    dataset = args.dataset.upper()
    assert dataset == 'MNIST' or dataset == 'CIFAR10'
    if dataset == 'MNIST':
        training_parameters = MNIST_Training_Parameters
        model_framework = MNISTConvNet().to(device)
        batch_size = training_parameters['batch_size']
        # train_loader for training the initial model
        train_loader, valid_loader = get_mnist_train_validate_loader(
            dir_name='../RawModels/MNIST/',
            batch_size=batch_size,
            valid_size=0.1,
            shuffle=True)
        # raw train_loader (no augmentation) for constructing the SoftLabelDataset and then used to train the distilled model
        raw_train_loader, raw_valid_loader = get_mnist_train_validate_loader(
            dir_name='../RawModels/MNIST/',
            batch_size=batch_size,
            valid_size=0.1,
            shuffle=False)
        # testing dataset loader
        test_loader = get_mnist_test_loader(dir_name='../RawModels/MNIST/',
                                            batch_size=batch_size)
    else:
        training_parameters = CIFAR10_Training_Parameters
        model_framework = resnet20_cifar().to(device)
        batch_size = training_parameters['batch_size']
        # train_loader for training the initial model
        train_loader, valid_loader = get_cifar10_train_validate_loader(
            dir_name='../RawModels/CIFAR10/',
            batch_size=batch_size,
            valid_size=0.1,
            augment=True,
            shuffle=True)
        # raw train_loader (no augmentation) for constructing the SoftLabelDataset and then used to train the distilled model
        raw_train_loader, raw_valid_loader = get_cifar10_train_validate_loader(
            dir_name='../RawModels/CIFAR10/',
            batch_size=batch_size,
            valid_size=0.1,
            augment=False,
            shuffle=False)
        # testing dataset loader
        test_loader = get_cifar10_test_loader(dir_name='../RawModels/CIFAR10/',
                                              batch_size=batch_size)

    defense_name = 'DD'
    dd = DistillationDefense(model=model_framework,
                             defense_name=defense_name,
                             dataset=dataset,
                             temperature=args.temp,
                             training_parameters=training_parameters,
                             device=device)
    dd.defense(initial_flag=args.initial,
               train_loader=train_loader,
               validation_loader=valid_loader,
               raw_train=raw_train_loader,
               raw_valid=raw_valid_loader,
               test_loader=test_loader)
Example #5
0
def main(args):
    # Device configuration
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_index
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # Set the random seed manually for reproducibility.
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    torch.manual_seed(args.seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)
    random.seed(args.seed)

    # Get training parameters, set up model frameworks and then get the train_loader and test_loader
    dataset = args.dataset.upper()
    assert dataset == 'MNIST' or dataset == 'CIFAR10'
    if dataset == 'MNIST':
        training_parameters = MNIST_Training_Parameters
        model_framework = MNISTConvNet().to(device)
        batch_size = training_parameters['batch_size']
        train_loader, valid_loader = get_mnist_train_validate_loader(
            dir_name='../RawModels/MNIST/',
            batch_size=batch_size,
            valid_size=0.1,
            shuffle=True)
        test_loader = get_mnist_test_loader(dir_name='../RawModels/MNIST/',
                                            batch_size=batch_size)
    else:
        training_parameters = CIFAR10_Training_Parameters
        model_framework = resnet20_cifar().to(device)
        batch_size = training_parameters['batch_size']
        train_loader, valid_loader = get_cifar10_train_validate_loader(
            dir_name='../RawModels/CIFAR10/',
            batch_size=batch_size,
            valid_size=0.1,
            shuffle=True)
        test_loader = get_cifar10_test_loader(dir_name='../RawModels/CIFAR10/',
                                              batch_size=batch_size)

    defense_name = 'EAT'
    eat_params = {'eps': args.eps, 'alpha': args.alpha}

    eat = EATDefense(model=model_framework,
                     defense_name=defense_name,
                     dataset=dataset,
                     training_parameters=training_parameters,
                     device=device,
                     **eat_params)

    # train the external models
    if args.train_externals:
        print('\nStart to train the external models ......\n')
        eat.train_external_model_group(train_loader=train_loader,
                                       validation_loader=valid_loader)

    # load the external models
    pre_train_models = eat.load_external_model_group(
        model_dir='../DefenseEnhancedModels/EAT/', test_loader=test_loader)

    eat.defense(pre_trained_models=pre_train_models,
                train_loader=train_loader,
                validation_loader=valid_loader)
Example #6
0
def main(args):
    # Device configuration
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_index
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # Set the random seed manually for reproducibility.
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    torch.manual_seed(args.seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)
    random.seed(args.seed)

    # prepare the dataset name, candidate num, dataset location and raw model location
    dataset = args.dataset.upper()
    num = args.number
    dataset_location = '../RawModels/{}/'.format(dataset)
    raw_model_location = '../RawModels/{}/model/{}_raw.pt'.format(
        dataset, dataset)
    print(
        "\nStarting to select {} {} Candidates Example, which are correctly classified by the Raw Model from {}\n"
        .format(num, dataset, raw_model_location))

    # load the raw model and testing dataset
    assert args.dataset == 'MNIST' or args.dataset == 'CIFAR10'
    if dataset == 'MNIST':
        raw_model = MNISTConvNet().to(device)
        raw_model.load(path=raw_model_location, device=device)
        test_loader = get_mnist_test_loader(dir_name=dataset_location,
                                            batch_size=1,
                                            shuffle=False)
    else:
        raw_model = resnet20_cifar().to(device)
        raw_model.load(path=raw_model_location, device=device)
        test_loader = get_cifar10_test_loader(dir_name=dataset_location,
                                              batch_size=1,
                                              shuffle=False)

    # get the successfully classified examples

    successful = []
    raw_model.eval()

    with torch.no_grad():
        for image, label in test_loader:
            image = image.to(device)
            label = label.to(device)
            output = raw_model(image)
            _, predicted = torch.max(output.data, 1)
            if predicted == label:
                _, least_likely_class = torch.min(output.data, 1)
                successful.append([image, label, least_likely_class])

    print(len(successful))
    candidates = random.sample(successful, num)

    candidate_images = []
    candidate_labels = []
    candidates_llc = []
    candidate_targets = []

    for index in range(len(candidates)):
        image = candidates[index][0].cpu().numpy()
        image = np.squeeze(image, axis=0)
        candidate_images.append(image)

        label = candidates[index][1].cpu().numpy()[0]
        llc = candidates[index][2].cpu().numpy()[0]

        # selection for the targeted label
        classes = [i for i in range(10)]
        classes.remove(label)
        target = random.sample(classes, 1)[0]

        one_hot_label = [0 for i in range(10)]
        one_hot_label[label] = 1

        one_hot_llc = [0 for i in range(10)]
        one_hot_llc[llc] = 1

        one_hot_target = [0 for i in range(10)]
        one_hot_target[target] = 1

        candidate_labels.append(one_hot_label)
        candidates_llc.append(one_hot_llc)
        candidate_targets.append(one_hot_target)

    candidate_images = np.array(candidate_images)
    candidate_labels = np.array(candidate_labels)
    candidates_llc = np.array(candidates_llc)
    candidate_targets = np.array(candidate_targets)

    if dataset not in os.listdir('./'):
        os.mkdir('./{}/'.format(dataset))
    else:
        shutil.rmtree('{}'.format(dataset))
        os.mkdir('./{}/'.format(dataset))

    np.save('./{}/{}_inputs.npy'.format(dataset, dataset), candidate_images)
    np.save('./{}/{}_labels.npy'.format(dataset, dataset), candidate_labels)
    np.save('./{}/{}_llc.npy'.format(dataset, dataset), candidates_llc)
    np.save('./{}/{}_targets.npy'.format(dataset, dataset), candidate_targets)
Example #7
0
def main(args):
    # Device configuration
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_index
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # Set the random seed manually for reproducibility.
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    torch.manual_seed(args.seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)
    random.seed(args.seed)

    dataset = args.dataset.upper()
    assert dataset == 'MNIST' or dataset == 'CIFAR10'
    batch_size = 1000
    # get the training_parameters, raw_model, and test_loader
    model_location = '{}/{}/model/{}_raw.pt'.format('../RawModels', dataset, dataset)
    if dataset == 'MNIST':
        raw_model = MNISTConvNet().to(device)
        test_loader = get_mnist_test_loader(dir_name='../RawModels/MNIST/', batch_size=batch_size)
    else:
        raw_model = resnet20_cifar().to(device)
        test_loader = get_cifar10_test_loader(dir_name='../RawModels/CIFAR10/', batch_size=batch_size)

    raw_model.load(path=model_location, device=device)

    defense_name = 'RC'
    rc = RCDefense(model=raw_model, defense_name=defense_name, dataset=dataset, device=device, num_points=args.num_points)

    # search the radius r
    if args.search:
        # get the validation dataset (10% with the training dataset)
        print('start to search the radius r using validation dataset ...')
        if dataset == 'MNIST':
            _, valid_loader = get_mnist_train_validate_loader(dir_name='../RawModels/MNIST/', batch_size=batch_size, valid_size=0.02,
                                                              shuffle=True)
        else:
            _, valid_loader = get_cifar10_train_validate_loader(dir_name='../RawModels/CIFAR10/', batch_size=batch_size, valid_size=0.02,
                                                                shuffle=True)
        radius = rc.search_best_radius(validation_loader=valid_loader, radius_min=args.radius_min, radius_max=args.radius_max,
                                       radius_step=args.radius_step)
    else:
        radius = round(args.radius, 2)
    print('######\nthe radius for RC is set or searched as: {}\n######'.format(radius))

    # calculate the accuracy of region-based classification defense on testing dataset
    print('\nStart to calculate the accuracy of region-based classification defense on testing dataset')
    raw_model.eval()
    total = 0.0
    correct = 0.0
    with torch.no_grad():
        for images, labels in test_loader:
            # apply the region-based classification on images using radius
            rc_labels = rc.region_based_classification(samples=images, radius=radius)
            rc_labels = torch.from_numpy(rc_labels)

            total += labels.size(0)
            correct += (rc_labels == labels).sum().item()
        ratio = correct / total
        print('\nTest accuracy of the {} model on the testing dataset: {:.1f}/{:.1f} = {:.2f}%\n'.format(raw_model.model_name, correct, total,
                                                                                                         ratio * 100))