def main():
    args = Args().get_args()
    kwargs = vars(args)
    checkpoint = torch.load(args.checkpoint)
    base_classifier = get_architecture(checkpoint["arch"], args.dataset)
    base_classifier.load_state_dict(checkpoint['state_dict'])

    attacker = SmoothAttack(base_classifier)
    smoothed_classifier = Smooth(base_classifier,
                                 get_num_classes(args.dataset), args.sigma)

    dataset = get_dataset(args.dataset, 'test')
    average_nat = []
    average_adv = []

    j_header('index', 'nat_y', 'adv_y', 'nat_rad', 'adv_rad', 'success')
    figure = FigureSaver()
    for i in range(0, len(dataset), args.skip):
        (x, label) = dataset[i]
        x = x.cuda()
        first_x = x.data

        nat_pred, nat_rad = smoothed_classifier.certify(
            x, args.N0, args.N, args.alpha, args.batch)
        if nat_pred is -1:
            continue
        if args.dataset == DATASETS[0]:  # ImageNet
            targets = [j for j in range(0, 1000, 100) if j is not label]
        else:
            targets = [j for j in range(10) if j is not label]
        best_rad = -10.0
        best_image = None
        best_target = -1

        for target in targets:
            adv_x = attacker.perturb(x=first_x, y=target, **kwargs)
            # If you want to do wasserstein attack, uncomment the following and change the attacker to wasserstein
            # adv_x = attacker.perturb(x=first_x, y=target, eps=args.sigma, steps=args.steps, batch=args.batch)
            adv_pred, adv_rad = smoothed_classifier.certify(
                adv_x, args.N0, 2 * args.N0, args.alpha, args.batch)
            adv_suc = (adv_pred != label) and (adv_pred != -1) and (nat_pred !=
                                                                    -1)
            adv_rad = adv_rad if adv_suc else -adv_rad

            if adv_rad > best_rad:
                best_rad = adv_rad
                best_image = adv_x.data
                best_target = target

        figure.save(best_image, i, 'best={}'.format(best_target))
        figure.save(first_x, i, 'natural')
        best_pred, best_rad = smoothed_classifier.certify(
            best_image, args.N0, args.N, args.alpha, args.batch)
        j_print(i, label, best_target, nat_rad, best_rad)
        average_adv.append(best_rad)
        average_nat.append(nat_rad)
    average_nat = np.array(average_nat)
    average_adv = np.array(average_adv)
    print('Average nat radii {}, Average adv radii {}'.format(
        average_nat.mean(), average_adv.mean()))
Ejemplo n.º 2
0
def test(epoch, patch, patch_shape):
    netClassifier.eval()
    cor = 0
    total = 0
    smoothed_classifier = Smooth(netClassifier, 10, opt.sigma)
    for batch_idx, (data, labels) in enumerate(test_loader):
        if labels.item() == target:
            continue
        if torch.cuda.is_available:
            data = data.cuda()
            labels = labels.cuda()
        data, labels = Variable(data), Variable(labels)
        data = data[:, [2, 1, 0], :, :]  # rgb to bgr

        prediction = netClassifier(data)

        total += 1

        # transform path
        data_shape = data.data.cpu().numpy().shape
        if patch_type == 'circle':
            patch, mask, patch_shape = circle_transform(
                patch, data_shape, patch_shape, image_size)
        elif patch_type == 'square':
            patch, mask = square_transform(patch, data_shape, patch_shape,
                                           image_size)
        patch, mask = torch.FloatTensor(patch), torch.FloatTensor(mask)
        if torch.cuda.is_available:
            patch, mask = patch.cuda(), mask.cuda()
        patch, mask = Variable(patch), Variable(mask)

        adv_x = torch.mul((1 - mask), data) + torch.mul(mask, patch)
        adv_x = torch.clamp(adv_x, min_out, max_out)

        #adv_label = netClassifier(adv_x).data.max(1)[1][0]
        ori_label = labels.data[0]

        if epoch == opt.epochs:

            prediction = smoothed_classifier.predict(adv_x, opt.N, opt.alpha,
                                                     opt.batch)
            cor += int(prediction == int(labels))

            # log the prediction and whether it was correct
            #print("{}\t{}\t{}\t{}\t{}".format(labels, prediction, cor, time_elapsed), file=f, flush=True)
        masked_patch = torch.mul(mask, patch)
        patch = masked_patch.data.cpu().numpy()
        new_patch = np.zeros(patch_shape)
        for i in range(new_patch.shape[0]):
            for j in range(new_patch.shape[1]):
                new_patch[i][j] = submatrix(patch[i][j])

        patch = new_patch

    if epoch == opt.epochs:
        print("final accuracy is ", cor / total)
    else:
        print("continue to run")
Ejemplo n.º 3
0
def test(epoch, patch, patch_shape):
    netClassifier.eval()
    cor = 0
    total = 0
    smoothed_classifier = Smooth(netClassifier, 16, opt.sigma)
    for batch_idx, (data, labels) in enumerate(test_loader):
        if labels.item() == target:
            continue
        if torch.cuda.is_available:
            data = data.cuda()
            labels = labels.cuda()
        data, labels = Variable(data), Variable(labels)

        prediction = netClassifier(data)

        # only computer adversarial examples on examples that are originally classified correctly
        # if prediction.data.max(1)[1][0] != labels.data[0]:
        #     continue

        total += 1

        # transform path
        data_shape = data.data.cpu().numpy().shape
        if patch_type == 'circle':
            patch, mask, patch_shape = circle_transform(
                patch, data_shape, patch_shape, image_size)
        elif patch_type == 'square':
            patch, mask = square_transform(patch, data_shape, patch_shape,
                                           image_size)
        patch, mask = torch.FloatTensor(patch), torch.FloatTensor(mask)
        if torch.cuda.is_available:
            patch, mask = patch.cuda(), mask.cuda()
        patch, mask = Variable(patch), Variable(mask)

        adv_x = torch.mul((1 - mask), data) + torch.mul(mask, patch)
        adv_x = torch.clamp(adv_x, min_out, max_out)

        ori_label = labels.data[0]

        if epoch == opt.epochs:

            prediction = smoothed_classifier.predict(adv_x, opt.N, opt.alpha,
                                                     opt.batch)
            cor += int(prediction == int(labels))
            #print(total)
            if total % 100 == 0:
                print(cor / total * 100)
        masked_patch = torch.mul(mask, patch)
        patch = masked_patch.data.cpu().numpy()
        new_patch = np.zeros(patch_shape)
        for i in range(new_patch.shape[0]):
            for j in range(new_patch.shape[1]):
                new_patch[i][j] = submatrix(patch[i][j])

        patch = new_patch

    print("The final accuracy is ", cor / total * 100)
Ejemplo n.º 4
0
                    default=100000,
                    help="number of samples to use")
parser.add_argument("--alpha",
                    type=float,
                    default=0.001,
                    help="failure probability")
args = parser.parse_args()

if __name__ == "__main__":
    # load the base classifier
    checkpoint = torch.load(args.base_classifier)
    base_classifier = get_architecture(checkpoint["arch"], args.dataset)
    base_classifier.load_state_dict(checkpoint['state_dict'])

    # create the smoothed classifier g
    smoothed_classifier = Smooth(base_classifier,
                                 get_num_classes(args.dataset), args.sigma)

    # prepare output file
    f = open(args.outfile, 'w')

    # iterate through the dataset
    dataset = get_dataset(args.dataset, args.split)
    print("idx\tlabel\tpredict\tcorrect\tscore\ttime", flush=True)
    print("idx\tlabel\tpredict\tcorrect\tscore\ttime", file=f, flush=True)
    for i in range(len(dataset)):

        # only certify every args.skip examples, and stop after args.max examples
        if i % args.skip != 0:
            continue
        if i == args.max:
            break
parser.add_argument("--skip", type=int, default=1, help="how many examples to skip")
parser.add_argument("--max", type=int, default=-1, help="stop after this many examples")
parser.add_argument("--split", choices=["train", "test"], default="test", help="train or test set")
parser.add_argument("--N0", type=int, default=100)
parser.add_argument("--N", type=int, default=100000, help="number of samples to use")
parser.add_argument("--alpha", type=float, default=0.001, help="failure probability")
args = parser.parse_args()

if __name__ == "__main__":
    # load the base classifier
    checkpoint = torch.load(args.base_classifier)
    base_classifier = get_architecture(checkpoint["arch"], args.dataset)
    base_classifier.load_state_dict(checkpoint['state_dict'])

    # create the smooothed classifier g
    smoothed_classifier = Smooth(base_classifier, get_num_classes(args.dataset), args.sigma)

    # prepare output file
    if not os.path.exists(os.path.dirname(args.outfile)):
        os.makedirs(os.path.dirname(args.outfile))
    f = open(args.outfile, 'w')
    print("idx\tlabel\tpredict\tradius\tcorrect\ttime", file=f, flush=True)

    # iterate through the dataset
    dataset = get_dataset(args.dataset, args.split)
    for i in range(len(dataset)):

        # only certify every args.skip examples, and stop after args.max examples
        if i % args.skip != 0:
            continue
        if i == args.max:
Ejemplo n.º 6
0
                                       shuffle=True)
        for x in ['train', 'val', 'test']
    }

    dataset_sizes = {
        x: len(image_datasets[x])
        for x in ['train', 'val', 'test']
    }
    class_names = image_datasets['train'].classes

    print("success1")

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print(device)
    print(len(image_datasets["test"]))
    smoothed_classifier = Smooth(base_classifier, 10, args.sigma)

    # prepare output file
    f = open(args.outfile, 'w')
    print("idx\tlabel\tpredict\tcorrect\ttime", file=f, flush=True)

    # iterate through the dataset

    dataset = image_datasets["test"]

    glass1 = cv2.imread(
        '/home/research/tongwu/glass/models/dataprepare/silhouette.png')
    glass = transforms.ToTensor()(glass1)

    # eps     = [0, 0.5 , 1  , 1.5 , 2  , 2.5 , 3  ]
    # alpha   = [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3]

    target_test_loader = torch.utils.data.DataLoader(
        dataset=dataset_target_test,
        batch_size=batch_size,
        shuffle=False,
        num_workers=8
    )


    checkpoint = torch.load(args.base_classifier)
    base_classifier = CNN(in_channels=3, target=True).to(device)
    base_classifier.load_state_dict(checkpoint['model'])

    # create the smooothed classifier g
    smoothed_classifier = Smooth(base_classifier, num_classes= 10, sigma=args.sigma)

    # prepare output file
    # f = open(args.outfile, 'w')
    # print("idx\tlabel\tpredict\tradius\tcorrect\ttime", file=f, flush=True)

    # iterate through the dataset
    n_total = 0
    n_correct = 0
    thresh_list = [0,0.5,1.0,1.5,2.0,2.5,3.0]
    correct_list = [0]*7
    for i,data in enumerate(target_test_loader):
        if i % 100 == 0:
            print(i)
        # if i > 100:
        #     break
Ejemplo n.º 8
0
                        help="number of samples to use")
    parser.add_argument("--alpha",
                        type=float,
                        default=0.001,
                        help="failure probability")
    args = parser.parse_args()

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    f = open(args.outfile, 'w')
    print("idx\tlabel\tpredict\tcorrect\ttime", file=f, flush=True)
    batch_size = 1
    dataloaders, dataset_sizes = data_process(batch_size)
    model = VGG_16()
    model.load_state_dict(torch.load('../donemodel/' + args.model))
    model.to(device)
    smoothed_classifier = Smooth(model, 10, args.sigma)

    eps = [0.5, 1, 1.5, 2, 2.5, 3]  # eps is epsilon of the l_2 bound
    alpha = [0.05, 0.1, 0.15, 0.2, 0.25, 0.3]  # alpha is learning rate
    itera = [20, 20, 20, 20, 20, 20]  # iterations to find optimal
    restart = [
        1, 1, 1, 1, 1, 1
    ]  # restart times, since we just do some standard check of our model,
    # we do not use mutliple restarts, but you can change that if you want
    # delete some hyperparmeters could speed up

    for i in range(len(eps)):
        cor = 0
        tot = 0
        for k in dataloaders['test']:
            (x, label) = k
Ejemplo n.º 9
0
                    help="failure probability")
parser.add_argument("--confidence_measure",
                    choices=["pred_score", "margin"],
                    default="pred_score",
                    help="which confidence notion to use")
args = parser.parse_args()

if __name__ == "__main__":
    # load the base classifier
    checkpoint = torch.load(args.base_classifier)
    base_classifier = get_architecture(checkpoint["arch"], args.dataset)
    base_classifier.load_state_dict(checkpoint['state_dict'])

    # create the smooothed classifier g
    smoothed_classifier = Smooth(base_classifier,
                                 get_num_classes(args.dataset), args.sigma,
                                 args.confidence_measure)

    # prepare output file
    f = open(args.outfile, 'w')
    print(
        "idx\tlabel\tpredict\texp_cdf_00\texp_cdf_25\texp_cdf_50\texp_cdf_75\texp_cdf_100\texp_cdf_125\texp_cdf_150\t"
        "exp_00\texp_25\texp_50\texp_75\texp_100\texp_125\texp_150\tcorrect\ttime",
        file=f,
        flush=True)

    # iterate through the dataset
    dataset = get_dataset(args.dataset, args.split)
    for i in range(len(dataset)):

        # only certify every args.skip examples, and stop after args.max examples
Ejemplo n.º 10
0
                    default=200000,
                    help="number of samples to use")
parser.add_argument("--alpha",
                    type=float,
                    default=0.001,
                    help="failure probability")
args = parser.parse_args()

if __name__ == "__main__":
    # load the base classifier
    checkpoint = torch.load(args.base_classifier)
    base_classifier = get_architecture(checkpoint["arch"], args.dataset)
    base_classifier.load_state_dict(checkpoint['state_dict'])

    # create the smooothed classifier g
    smoothed_classifier = Smooth(base_classifier,
                                 get_num_classes(args.dataset), args.sigma)

    save_directory = os.path.dirname(args.outfile)
    if not os.path.exists(save_directory):
        os.makedirs(save_directory)

    # prepare output file
    f = open(args.outfile, 'a')
    print(
        "idx\tlabel\tcount\tpredict\tradiusR\tradiusG\tradiusB\tradius_L1\tradius_LInf\tradius_L2\tradius_cohen\tcorrect\ttime",
        file=f,
        flush=True)

    # iterate through the dataset
    dataset = get_dataset(args.dataset, args.split)
    for i in range(args.start, args.start + (args.skip * args.max), args.skip):
Ejemplo n.º 11
0
parser.add_argument('--scale_down',default=1, type=int, help="factor to scale each dimension down by")

args = parser.parse_args()

if __name__ == "__main__":
    # load the base classifier
    checkpoint = torch.load(args.base_classifier)
    base_classifier = get_architecture(checkpoint["arch"], args.dataset)
    base_classifier.load_state_dict(checkpoint['state_dict'])
    base_classifier.eval()
    # create the smooothed classifier g
    if (args.scale_down != 1):
        base_classifier_orig = base_classifier
        base_classifier = lambda x: base_classifier_orig(torch.nn.functional.interpolate(x, scale_factor=args.scale_down))

    smoothed_classifier = Smooth(base_classifier, get_num_classes(args.dataset), args.sigma,p=args.p)

    # prepare output file
    f = open(args.outfile, 'w')
    if (args.p == 2):
        print("idx\tlabel\tpredict\tcount\tany_iid_distribution_bound\tgeneralized_gaussian_bound_over_c\texact_radius\tcorrect\ttime", file=f, flush=True)
    else:
        print("idx\tlabel\tpredict\tcount\tany_iid_distribution_bound\tgeneralized_gaussian_bound_over_c\tcorrect\ttime", file=f, flush=True)

    # iterate through the dataset
    if (args.scale_down == 1 or args.dataset == "imagenet"):
        dataset = get_dataset(args.dataset, args.split)
    else:
        dataset = datasets.CIFAR10("./dataset_cache", train=False, download=True, transform=transforms.Compose([
            transforms.Resize(int(32/args.scale_down)),
            transforms.ToTensor()
Ejemplo n.º 12
0
    ############ data init ###################
    print("reading data ... ")
    test_loader = get_testing_data(batch_size=1) # each time, we perform sampling for one point.
    ##########################################

    ############ model init ##################
    print("initializing model ... ")
    print("arch : ",args.arch)
    if args.arch == 'resnet-110':
        myNet = resnet.resnet(depth=110,num_classes=10)
        data_normalizer = get_normalize_layer('cifar10')
        myNet = torch.nn.Sequential(data_normalizer,myNet)
    else:
        print("[Error] : Invalid Architecture")
        exit(0)
    print("checkpoint : ",args.model_path)
    model_dict = torch.load(args.model_path)
    myNet.load_state_dict(model_dict)
    print("device : ",device)
    myNet = torch.nn.DataParallel(myNet)
    myNet.to(device)

    print("noise level : ",args.noise_sd)
    print("initializing smooth model ...")
    myNet = Smooth(base_classifier=myNet,num_classes=n_class,sigma=args.noise_sd)
    ############################################

    certify(test_loader,myNet,args)

Ejemplo n.º 13
0
    
    # dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=32,
    #                                              shuffle=True)
    #               for x in ['train', 'val','test']}
    
    
    dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val','test']}
    class_names = image_datasets['train'].classes
    
    print("success1")
    
    
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print(device)
    print(len(image_datasets["test"]))
    smoothed_classifier = Smooth(base_classifier, 10, args.sigma)



    f = open(args.outfile, 'w')
    print("idx\tlabel\tpredict\tradius\tcorrect\ttime", file=f, flush=True)

    # iterate through the dataset
    dataset = image_datasets["test"]
    for i in range(len(dataset)):

        # only certify every args.skip examples, and stop after args.max examples
        if i % args.skip != 0:
            continue
        if i == args.max:
            break
Ejemplo n.º 14
0
    weight_05conv_mixatten = '/media/unknown/Data/PLP/fast_adv/defenses/weights/shape_0.5_cifar10_mixed_Attention/cifar10acc0.8434999763965607_130.pth'
    weight_1conv_mixatten = '/media/unknown/Data/PLP/fast_adv/defenses/weights/best/1MixedAttention_mixed_attention_cifar10_ep_25_val_acc0.7080.pth'
    weight_shape_alp = '/media/unknown/Data/PLP/fast_adv/defenses/weights/best/shape_ALP_cifar10_ep_79_val_acc0.7625.pth'
    weight_attention = '/media/unknown/Data/PLP/fast_adv/defenses/weights/cifar10_Attention/cifar10acc0.8729999780654907_120.pth'
    weight_025conv_mixatten_ALP = '/media/unknown/Data/PLP/fast_adv/defenses/weights/best/0.25Mixed+ALP_cifar10_ep_85_val_acc0.8650.pth'
    weight_smooth = '/media/unknown/Data/PLP/fast_adv/defenses/weights/best/2random_smooth_cifar10_ep_120_val_acc0.8510.pth'
    weight_05smooth = '/media/unknown/Data/PLP/fast_adv/defenses/weights/shape_0.5_random/cifar10acc0.6944999784231186_50.pth'
    weight_025smooth = '/media/unknown/Data/PLP/fast_adv/defenses/weights/best/0.25random_smooth_cifar10_ep_146_val_acc0.8070.pth'
    weight_1smooth = '/media/unknown/Data/PLP/fast_adv/defenses/weights/best/1random_smooth_cifar10_ep_107_val_acc0.5380.pth'

    model_file = weight_025smooth
    model_dict = torch.load(model_file)
    model.load_state_dict(model_dict)

    # create the smooothed classifier g
    smoothed_classifier = Smooth(model, 10, 0.25)

    # prepare output file
    f = open('out_certify_025_smo100000', 'w')
    print("idx\tlabel\tpredict\tradius\tcorrect\ttime", file=f, flush=True)

    # iterate through the dataset
    #dataset = get_dataset(args.dataset, args.split)
    test_transform = transforms.Compose([
        transforms.ToTensor(),
    ])
    print('56')
    dataset = data.Subset(
        CIFAR10(args.data, train=True, transform=test_transform,
                download=True), list(range(48000, 50000)))
    for i in range(len(dataset)):