def main():
    args = Args().get_args()
    kwargs = vars(args)
    checkpoint = torch.load(args.checkpoint)
    base_classifier = get_architecture(checkpoint["arch"], args.dataset)
    base_classifier.load_state_dict(checkpoint['state_dict'])

    attacker = SmoothAttack(base_classifier)
    smoothed_classifier = Smooth(base_classifier,
                                 get_num_classes(args.dataset), args.sigma)

    dataset = get_dataset(args.dataset, 'test')
    average_nat = []
    average_adv = []

    j_header('index', 'nat_y', 'adv_y', 'nat_rad', 'adv_rad', 'success')
    figure = FigureSaver()
    for i in range(0, len(dataset), args.skip):
        (x, label) = dataset[i]
        x = x.cuda()
        first_x = x.data

        nat_pred, nat_rad = smoothed_classifier.certify(
            x, args.N0, args.N, args.alpha, args.batch)
        if nat_pred is -1:
            continue
        if args.dataset == DATASETS[0]:  # ImageNet
            targets = [j for j in range(0, 1000, 100) if j is not label]
        else:
            targets = [j for j in range(10) if j is not label]
        best_rad = -10.0
        best_image = None
        best_target = -1

        for target in targets:
            adv_x = attacker.perturb(x=first_x, y=target, **kwargs)
            # If you want to do wasserstein attack, uncomment the following and change the attacker to wasserstein
            # adv_x = attacker.perturb(x=first_x, y=target, eps=args.sigma, steps=args.steps, batch=args.batch)
            adv_pred, adv_rad = smoothed_classifier.certify(
                adv_x, args.N0, 2 * args.N0, args.alpha, args.batch)
            adv_suc = (adv_pred != label) and (adv_pred != -1) and (nat_pred !=
                                                                    -1)
            adv_rad = adv_rad if adv_suc else -adv_rad

            if adv_rad > best_rad:
                best_rad = adv_rad
                best_image = adv_x.data
                best_target = target

        figure.save(best_image, i, 'best={}'.format(best_target))
        figure.save(first_x, i, 'natural')
        best_pred, best_rad = smoothed_classifier.certify(
            best_image, args.N0, args.N, args.alpha, args.batch)
        j_print(i, label, best_target, nat_rad, best_rad)
        average_adv.append(best_rad)
        average_nat.append(nat_rad)
    average_nat = np.array(average_nat)
    average_adv = np.array(average_adv)
    print('Average nat radii {}, Average adv radii {}'.format(
        average_nat.mean(), average_adv.mean()))
Example #2
0
def test(epoch, patch, patch_shape):
    netClassifier.eval()
    cor = 0
    total = 0
    smoothed_classifier = Smooth(netClassifier, 10, opt.sigma)
    for batch_idx, (data, labels) in enumerate(test_loader):
        if labels.item() == target:
            continue
        if torch.cuda.is_available:
            data = data.cuda()
            labels = labels.cuda()
        data, labels = Variable(data), Variable(labels)
        data = data[:, [2, 1, 0], :, :]  # rgb to bgr

        prediction = netClassifier(data)

        total += 1

        # transform path
        data_shape = data.data.cpu().numpy().shape
        if patch_type == 'circle':
            patch, mask, patch_shape = circle_transform(
                patch, data_shape, patch_shape, image_size)
        elif patch_type == 'square':
            patch, mask = square_transform(patch, data_shape, patch_shape,
                                           image_size)
        patch, mask = torch.FloatTensor(patch), torch.FloatTensor(mask)
        if torch.cuda.is_available:
            patch, mask = patch.cuda(), mask.cuda()
        patch, mask = Variable(patch), Variable(mask)

        adv_x = torch.mul((1 - mask), data) + torch.mul(mask, patch)
        adv_x = torch.clamp(adv_x, min_out, max_out)

        #adv_label = netClassifier(adv_x).data.max(1)[1][0]
        ori_label = labels.data[0]

        if epoch == opt.epochs:

            prediction = smoothed_classifier.predict(adv_x, opt.N, opt.alpha,
                                                     opt.batch)
            cor += int(prediction == int(labels))

            # log the prediction and whether it was correct
            #print("{}\t{}\t{}\t{}\t{}".format(labels, prediction, cor, time_elapsed), file=f, flush=True)
        masked_patch = torch.mul(mask, patch)
        patch = masked_patch.data.cpu().numpy()
        new_patch = np.zeros(patch_shape)
        for i in range(new_patch.shape[0]):
            for j in range(new_patch.shape[1]):
                new_patch[i][j] = submatrix(patch[i][j])

        patch = new_patch

    if epoch == opt.epochs:
        print("final accuracy is ", cor / total)
    else:
        print("continue to run")
Example #3
0
def test(epoch, patch, patch_shape):
    netClassifier.eval()
    cor = 0
    total = 0
    smoothed_classifier = Smooth(netClassifier, 16, opt.sigma)
    for batch_idx, (data, labels) in enumerate(test_loader):
        if labels.item() == target:
            continue
        if torch.cuda.is_available:
            data = data.cuda()
            labels = labels.cuda()
        data, labels = Variable(data), Variable(labels)

        prediction = netClassifier(data)

        # only computer adversarial examples on examples that are originally classified correctly
        # if prediction.data.max(1)[1][0] != labels.data[0]:
        #     continue

        total += 1

        # transform path
        data_shape = data.data.cpu().numpy().shape
        if patch_type == 'circle':
            patch, mask, patch_shape = circle_transform(
                patch, data_shape, patch_shape, image_size)
        elif patch_type == 'square':
            patch, mask = square_transform(patch, data_shape, patch_shape,
                                           image_size)
        patch, mask = torch.FloatTensor(patch), torch.FloatTensor(mask)
        if torch.cuda.is_available:
            patch, mask = patch.cuda(), mask.cuda()
        patch, mask = Variable(patch), Variable(mask)

        adv_x = torch.mul((1 - mask), data) + torch.mul(mask, patch)
        adv_x = torch.clamp(adv_x, min_out, max_out)

        ori_label = labels.data[0]

        if epoch == opt.epochs:

            prediction = smoothed_classifier.predict(adv_x, opt.N, opt.alpha,
                                                     opt.batch)
            cor += int(prediction == int(labels))
            #print(total)
            if total % 100 == 0:
                print(cor / total * 100)
        masked_patch = torch.mul(mask, patch)
        patch = masked_patch.data.cpu().numpy()
        new_patch = np.zeros(patch_shape)
        for i in range(new_patch.shape[0]):
            for j in range(new_patch.shape[1]):
                new_patch[i][j] = submatrix(patch[i][j])

        patch = new_patch

    print("The final accuracy is ", cor / total * 100)
Example #4
0
                    default=100000,
                    help="number of samples to use")
parser.add_argument("--alpha",
                    type=float,
                    default=0.001,
                    help="failure probability")
args = parser.parse_args()

if __name__ == "__main__":
    # load the base classifier
    checkpoint = torch.load(args.base_classifier)
    base_classifier = get_architecture(checkpoint["arch"], args.dataset)
    base_classifier.load_state_dict(checkpoint['state_dict'])

    # create the smoothed classifier g
    smoothed_classifier = Smooth(base_classifier,
                                 get_num_classes(args.dataset), args.sigma)

    # prepare output file
    f = open(args.outfile, 'w')

    # iterate through the dataset
    dataset = get_dataset(args.dataset, args.split)
    print("idx\tlabel\tpredict\tcorrect\tscore\ttime", flush=True)
    print("idx\tlabel\tpredict\tcorrect\tscore\ttime", file=f, flush=True)
    for i in range(len(dataset)):

        # only certify every args.skip examples, and stop after args.max examples
        if i % args.skip != 0:
            continue
        if i == args.max:
            break
Example #5
0
                                       shuffle=True)
        for x in ['train', 'val', 'test']
    }

    dataset_sizes = {
        x: len(image_datasets[x])
        for x in ['train', 'val', 'test']
    }
    class_names = image_datasets['train'].classes

    print("success1")

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print(device)
    print(len(image_datasets["test"]))
    smoothed_classifier = Smooth(base_classifier, 10, args.sigma)

    # prepare output file
    f = open(args.outfile, 'w')
    print("idx\tlabel\tpredict\tcorrect\ttime", file=f, flush=True)

    # iterate through the dataset

    dataset = image_datasets["test"]

    glass1 = cv2.imread(
        '/home/research/tongwu/glass/models/dataprepare/silhouette.png')
    glass = transforms.ToTensor()(glass1)

    # eps     = [0, 0.5 , 1  , 1.5 , 2  , 2.5 , 3  ]
    # alpha   = [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3]

    target_test_loader = torch.utils.data.DataLoader(
        dataset=dataset_target_test,
        batch_size=batch_size,
        shuffle=False,
        num_workers=8
    )


    checkpoint = torch.load(args.base_classifier)
    base_classifier = CNN(in_channels=3, target=True).to(device)
    base_classifier.load_state_dict(checkpoint['model'])

    # create the smooothed classifier g
    smoothed_classifier = Smooth(base_classifier, num_classes= 10, sigma=args.sigma)

    # prepare output file
    # f = open(args.outfile, 'w')
    # print("idx\tlabel\tpredict\tradius\tcorrect\ttime", file=f, flush=True)

    # iterate through the dataset
    n_total = 0
    n_correct = 0
    thresh_list = [0,0.5,1.0,1.5,2.0,2.5,3.0]
    correct_list = [0]*7
    for i,data in enumerate(target_test_loader):
        if i % 100 == 0:
            print(i)
        # if i > 100:
        #     break
Example #7
0
                        help="number of samples to use")
    parser.add_argument("--alpha",
                        type=float,
                        default=0.001,
                        help="failure probability")
    args = parser.parse_args()

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    f = open(args.outfile, 'w')
    print("idx\tlabel\tpredict\tcorrect\ttime", file=f, flush=True)
    batch_size = 1
    dataloaders, dataset_sizes = data_process(batch_size)
    model = VGG_16()
    model.load_state_dict(torch.load('../donemodel/' + args.model))
    model.to(device)
    smoothed_classifier = Smooth(model, 10, args.sigma)

    eps = [0.5, 1, 1.5, 2, 2.5, 3]  # eps is epsilon of the l_2 bound
    alpha = [0.05, 0.1, 0.15, 0.2, 0.25, 0.3]  # alpha is learning rate
    itera = [20, 20, 20, 20, 20, 20]  # iterations to find optimal
    restart = [
        1, 1, 1, 1, 1, 1
    ]  # restart times, since we just do some standard check of our model,
    # we do not use mutliple restarts, but you can change that if you want
    # delete some hyperparmeters could speed up

    for i in range(len(eps)):
        cor = 0
        tot = 0
        for k in dataloaders['test']:
            (x, label) = k
Example #8
0
                    help="failure probability")
parser.add_argument("--confidence_measure",
                    choices=["pred_score", "margin"],
                    default="pred_score",
                    help="which confidence notion to use")
args = parser.parse_args()

if __name__ == "__main__":
    # load the base classifier
    checkpoint = torch.load(args.base_classifier)
    base_classifier = get_architecture(checkpoint["arch"], args.dataset)
    base_classifier.load_state_dict(checkpoint['state_dict'])

    # create the smooothed classifier g
    smoothed_classifier = Smooth(base_classifier,
                                 get_num_classes(args.dataset), args.sigma,
                                 args.confidence_measure)

    # prepare output file
    f = open(args.outfile, 'w')
    print(
        "idx\tlabel\tpredict\texp_cdf_00\texp_cdf_25\texp_cdf_50\texp_cdf_75\texp_cdf_100\texp_cdf_125\texp_cdf_150\t"
        "exp_00\texp_25\texp_50\texp_75\texp_100\texp_125\texp_150\tcorrect\ttime",
        file=f,
        flush=True)

    # iterate through the dataset
    dataset = get_dataset(args.dataset, args.split)
    for i in range(len(dataset)):

        # only certify every args.skip examples, and stop after args.max examples
Example #9
0
    ############ data init ###################
    print("reading data ... ")
    test_loader = get_testing_data(batch_size=1) # each time, we perform sampling for one point.
    ##########################################

    ############ model init ##################
    print("initializing model ... ")
    print("arch : ",args.arch)
    if args.arch == 'resnet-110':
        myNet = resnet.resnet(depth=110,num_classes=10)
        data_normalizer = get_normalize_layer('cifar10')
        myNet = torch.nn.Sequential(data_normalizer,myNet)
    else:
        print("[Error] : Invalid Architecture")
        exit(0)
    print("checkpoint : ",args.model_path)
    model_dict = torch.load(args.model_path)
    myNet.load_state_dict(model_dict)
    print("device : ",device)
    myNet = torch.nn.DataParallel(myNet)
    myNet.to(device)

    print("noise level : ",args.noise_sd)
    print("initializing smooth model ...")
    myNet = Smooth(base_classifier=myNet,num_classes=n_class,sigma=args.noise_sd)
    ############################################

    certify(test_loader,myNet,args)

    dataset = get_dataset(args.dataset, args.split)
    for i in range(len(dataset)):

        # only certify every args.skip examples, and stop after args.max examples
        if i % args.skip != 0:
            continue
        if i == args.max:
            break

        (x, label) = dataset[i]

        #Smooth the classifier with this sigma
        if not args.fix_sig_smooth:
            args.sigma = sigma_test[i].item()
        print('sigma is: ', args.sigma)
        smoothed_classifier = Smooth(model, get_num_classes(args.dataset),
                                     args.sigma)
        #Now you can use the same exac
        before_time = time()
        # certify the prediction of g around x
        x = x.cuda()
        prediction, radius = smoothed_classifier.certify(
            x, args.N0, args.N, args.alpha, args.batch)
        after_time = time()
        correct = int(prediction == label)
        print(radius)
        time_elapsed = str(
            datetime.timedelta(seconds=(after_time - before_time)))
        print("{}\t{}\t{}\t{:.3}\t{}\t{:.3}\t{}".format(
            i, label, prediction, radius, correct, args.sigma, time_elapsed),
              file=f,
              flush=True)
Example #11
0
    weight_05conv_mixatten = '/media/unknown/Data/PLP/fast_adv/defenses/weights/shape_0.5_cifar10_mixed_Attention/cifar10acc0.8434999763965607_130.pth'
    weight_1conv_mixatten = '/media/unknown/Data/PLP/fast_adv/defenses/weights/best/1MixedAttention_mixed_attention_cifar10_ep_25_val_acc0.7080.pth'
    weight_shape_alp = '/media/unknown/Data/PLP/fast_adv/defenses/weights/best/shape_ALP_cifar10_ep_79_val_acc0.7625.pth'
    weight_attention = '/media/unknown/Data/PLP/fast_adv/defenses/weights/cifar10_Attention/cifar10acc0.8729999780654907_120.pth'
    weight_025conv_mixatten_ALP = '/media/unknown/Data/PLP/fast_adv/defenses/weights/best/0.25Mixed+ALP_cifar10_ep_85_val_acc0.8650.pth'
    weight_smooth = '/media/unknown/Data/PLP/fast_adv/defenses/weights/best/2random_smooth_cifar10_ep_120_val_acc0.8510.pth'
    weight_05smooth = '/media/unknown/Data/PLP/fast_adv/defenses/weights/shape_0.5_random/cifar10acc0.6944999784231186_50.pth'
    weight_025smooth = '/media/unknown/Data/PLP/fast_adv/defenses/weights/best/0.25random_smooth_cifar10_ep_146_val_acc0.8070.pth'
    weight_1smooth = '/media/unknown/Data/PLP/fast_adv/defenses/weights/best/1random_smooth_cifar10_ep_107_val_acc0.5380.pth'

    model_file = weight_025smooth
    model_dict = torch.load(model_file)
    model.load_state_dict(model_dict)

    # create the smooothed classifier g
    smoothed_classifier = Smooth(model, 10, 0.25)

    # prepare output file
    f = open('out_certify_025_smo100000', 'w')
    print("idx\tlabel\tpredict\tradius\tcorrect\ttime", file=f, flush=True)

    # iterate through the dataset
    #dataset = get_dataset(args.dataset, args.split)
    test_transform = transforms.Compose([
        transforms.ToTensor(),
    ])
    print('56')
    dataset = data.Subset(
        CIFAR10(args.data, train=True, transform=test_transform,
                download=True), list(range(48000, 50000)))
    for i in range(len(dataset)):