def attack(self, img, model, target=None, pixel_count=1,
               maxiter=75, popsize=400, verbose=False, plot=False):
        # Change the target class based on whether this is a targeted attack or not
        
        targeted_attack = target is not None
        target_class = target if targeted_attack else self.y_test[img, 0]
        # print("----------------------------------------------------",self.y_test[img,0])
        # print("++++++++++++++++++++++++++++++++++++++++++++++++++++",target_class)
        # Define bounds for a flat vector of x,y,r,g,b values
        # For more pixels, repeat this layout
        dim_x, dim_y = self.dimensions
        # bounds = [(0,dim_x), (0,dim_y), (0,256), (0,256), (0,256)] * pixel_count
        bounds = [(0, dim_x), (0, dim_y), (0, 256)] * pixel_count

        # Population multiplier, in terms of the size of the perturbation vector x
        popmul = max(1, popsize // len(bounds))

        # Format the predict/callback functions for the differential evolution algorithm
        predict_fn = lambda xs: self.predict_classes(
            xs, self.x_test[img], target_class, model, target is None)
        # print(predict_fn)
        callback_fn = lambda x, convergence: self.attack_success(
            x, self.x_test[img], target_class, model, targeted_attack, verbose)
        # print(callback_fn)
        #print('模型的输入:',predict_fn,'类型为:',predict_fn.type)
        # Call Scipy's Implementation of Differential Evolution
        attack_result = differential_evolution(
            predict_fn, bounds, maxiter=maxiter, popsize=popmul,
            recombination=1, atol=-1, callback=callback_fn, polish=False)

        # Calculate some useful statistics to return from this function
        attack_image = helper.perturb_image(attack_result.x, self.x_test[img])[0]
        #print('***********************************************')
        #print('attack_image',attack_image)
        temp_list = attack_image.tolist()
        
        #print('***********************************************')
        # Calculate the L2 norm to represent the revise size
        L2_img = attack_image - self.x_test[img]
        L2_img = np.array(L2_img)

        L2_img = L2_img.reshape(784)
        # print(L2_img)
        L2_norm = np.sqrt(np.sum(np.square(L2_img)))

        prior_probs = model.predict(np.array([self.x_test[img]]))[0]
        # print('-----------------------_test1', prior_probs)
        predicted_probs = model.predict(np.array([attack_image]))[0]
        # print('-----------------------_test2', predicted_probs)
        predicted_class = np.argmax(predicted_probs)
        actual_class = self.y_test[img, 0]
        success = predicted_class != actual_class
        cdiff = prior_probs[actual_class] - predicted_probs[actual_class]

        # Show the best attempt at a solution (successful or not)
        if plot:
            helper.plot_image(attack_image, actual_class, self.class_names, predicted_class)

        return [model.name, pixel_count, img, actual_class, predicted_class, success, cdiff, prior_probs,
                predicted_probs, attack_result.x, L2_norm,]
Exemple #2
0
 def predict_classes(self, xs, img, target_class, model, minimize=True):
     # Perturb the image with the given pixel(s) x and get the prediction of the model
     imgs_perturbed = helper.perturb_image(xs, img)
     predictions = model.predict(imgs_perturbed)[:, target_class]
     # print(predictions)
     # This function should always be minimized, so return its complement if needed
     return predictions if minimize else 1 - predictions, imgs_perturbed
    def attack(self, img, model, target=None, pixel_count=1,
            maxiter=75, popsize=400, verbose=False, plot=False, preprocessing_cb=None):
        """
        @img: index to the image you want to attack
        @model: the model to attack
        @target: the index to the target you want to aim for
        @pixel_count: how many pixels to have in your attack
        @maxiter: maximum number of iterations on optimization
        @popsize: size of the population to use at each iteration of the optimization
        @verbose: boolean, controls printing
        @plot: boolean, whether to plot the final results
        """
        # Change the target class based on whether this is a targeted attack or not
        targeted_attack = target is not None
        target_class = target if targeted_attack else self.y_test[img, 0]

        # Define bounds for a flat vector of x,y,r,g,b values
        # For more pixels, repeat this layout
        dim_x, dim_y = self.dimensions
        bounds = [(0,dim_x), (0,dim_y), (0, 255), (0, 255), (0., 255)] * pixel_count

        # Population multiplier, in terms of the size of the perturbation vector x
        popmul = max(1, popsize // len(bounds))

        # Format the predict/callback functions for the differential evolution algorithm
        predict_fn = lambda xs: self.predict_classes(
            xs, self.x_test[img], target_class, model, target is None, preprocessing_cb=preprocessing_cb)
        callback_fn = lambda x, convergence: self.attack_success(
            x, self.x_test[img], target_class, model, targeted_attack, verbose, preprocessing_cb=preprocessing_cb)

        # Call Scipy's Implementation of Differential Evolution
        attack_result = differential_evolution(
            predict_fn, bounds, maxiter=maxiter, popsize=popmul,
            recombination=1, atol=-1, callback=callback_fn, polish=False)

        # Calculate some useful statistics to return from this function
        attack_image = helper.perturb_image(attack_result.x, self.x_test[img].copy())[0]

        if preprocessing_cb is not None:
            original_img = preprocessing_cb(self.x_test[img].copy())
            attack_image = preprocessing_cb(attack_image.copy())
        else:
            original_img = self.x_test[img].copy()

        prior_probs = model.predict(np.array([original_img]))[0]
        predicted_probs = model.predict(np.array([attack_image]))[0]
        predicted_class = np.argmax(predicted_probs)
        actual_class = self.y_test[img, 0]
        success = predicted_class != actual_class
        cdiff = prior_probs[actual_class] - predicted_probs[actual_class]

        # Show the best attempt at a solution (successful or not)
        if plot:
            helper.plot_image(attack_image, actual_class, self.class_names, predicted_class)

        return [model.name, pixel_count, img, actual_class, predicted_class, success, cdiff, prior_probs, predicted_probs, attack_result.x]
    def predict_classes(self, xs, img, target_class, model, minimize=True, preprocessing_cb=None):
        # Perturb the image with the given pixel(s) x and get the prediction of the model
        imgs_perturbed = helper.perturb_image(xs, img.copy())

        if preprocessing_cb is not None:
            imgs_perturbed = preprocessing_cb(imgs_perturbed)
        predictions = model.predict(imgs_perturbed)[:, target_class]

        # This function should always be minimized, so return its complement if needed
        return predictions if minimize else 1 - predictions
Exemple #5
0
    def attack(self,
               img,
               model,
               pixel_count=1,
               maxiter=125,
               popsize=400,
               plot=False):
        # Change the target class based on whether this is a targeted attack or not

        target_class = self.y_test[img, 0]

        # Define bounds for a flat vector of x,y,r,g,b values
        # For more pixels, repeat this layout
        dim_x, dim_y = self.dimensions
        bounds = [(0, dim_x), (0, dim_y), (0, 256), (0, 256),
                  (0, 256)] * pixel_count

        # Population multiplier, in terms of the size of the perturbation vector x
        popmul = max(1, popsize // len(bounds))

        # Format the predict/callback functions for the differential evolution algorithm
        predict_fn = lambda xs: model.predict(
            perturb_image(xs, self.x_test[img]))[0]
        callback_fn = lambda x, convergence: self.attack_success(
            x, self.x_test[img], target_class, model)

        # Call Scipy's Implementation of Differential Evolution
        attack_result = differential_evolution(predict_fn,
                                               bounds,
                                               maxiter=maxiter,
                                               popsize=popmul,
                                               recombination=1,
                                               atol=-1,
                                               callback=callback_fn,
                                               polish=False)

        # Calculate some useful statistics to return from this function
        attack_image = helper.perturb_image(attack_result.x,
                                            self.x_test[img])[0]
        prior_probs = model.predict(np.array([self.x_test[img]]))[0]
        predicted_probs = model.predict(np.array([attack_image]))[0]
        predicted_class = np.argmax(predicted_probs)
        actual_class = self.y_test[img, 0]
        success = predicted_class != actual_class
        cdiff = prior_probs[actual_class] - predicted_probs[actual_class]

        # Show the best attempt at a solution (successful or not)
        if plot:
            helper.plot_image(attack_image, actual_class, self.class_names,
                              predicted_class)

        return [
            model.name, pixel_count, img, actual_class, predicted_class,
            success, cdiff, prior_probs, predicted_probs, attack_result.x
        ]
    def attack_success(self, x, img, target_class, model, targeted_attack=False, verbose=False):
        # Perturb the image with the given pixel(s) and get the prediction of the model
        attack_image = helper.perturb_image(x, img)

        confidence = model.predict(attack_image)[0]
        predicted_class = np.argmax(confidence)
        
        # If the prediction is what we want (misclassification or 
        # targeted classification), return True
        if (verbose):
            print('Confidence:', confidence[target_class])
        if ((targeted_attack and predicted_class == target_class) or
            (not targeted_attack and predicted_class != target_class)):
            return True
    def predict_classes(self, xs, img, target_class, model, minimize=True):
        # Perturb the image with the given pixel(s) x and get the prediction of the model
        imgs_perturbed = helper.perturb_image(xs, img)

        target_class = int(target_class)
        predictions = model.predict(imgs_perturbed)[:, target_class]
        # print('模型的输入:',predictions,'输入的形状:',predictions.shape)
        # print('目标类别是:',target_class)
        # print('target_class is')
        # print(target_class)
        # This function should always be minimized, so return its complement if needed
        # print('predictions shape is')
        # print(predictions.shape)
        return predictions if minimize else 1 - predictions
    def attack_success(self, x, img, target_class, model, targeted_attack=False, verbose=False):
        # Perturb the image with the given pixel(s) and get the prediction of the model
        attack_image = helper.perturb_image(x, img)

        confidence = model.predict(attack_image)[0]
        predicted_class = np.argmax(confidence)

        # If the prediction is what we want (misclassification or
        # targeted classification), return True
        if (verbose):
            print('Confidence:', confidence[target_class])
        if ((targeted_attack and predicted_class == target_class) or
                (not targeted_attack and predicted_class != target_class)):
            return True
def attack(img,
           model,
           target=None,
           pixel_count=1,
           maxiter=75,
           popsize=400,
           verbose=False):
    # Change the target class based on whether this is a targeted attack or not
    targeted_attack = target is not None
    target_class = target if targeted_attack else y_test[img, 0]

    # Define bounds for a flat vector of x,y,r,g,b values
    # For more pixels, repeat this layout
    bounds = [(0, 32), (0, 32), (0, 256), (0, 256), (0, 256)] * pixel_count

    # Population multiplier, in terms of the size of the perturbation vector x
    popmul = max(1, popsize // len(bounds))

    # Format the predict/callback functions for the differential evolution algorithm
    predict_fn = lambda xs: predict_classes(xs, x_test[img], target_class,
                                            model, target is None)
    callback_fn = lambda x, convergence: attack_success(
        x, img, target_class, model, targeted_attack, verbose)

    # Call Scipy's Implementation of Differential Evolution
    attack_result = differential_evolution(predict_fn,
                                           bounds,
                                           maxiter=maxiter,
                                           popsize=popmul,
                                           recombination=1,
                                           atol=-1,
                                           callback=callback_fn,
                                           polish=False)

    # Calculate some useful statistics to return from this function
    attack_image = perturb_image(attack_result.x, x_test[img])[0]
    prior_probs = model.predict_one(x_test[img])
    predicted_probs = model.predict_one(attack_image)
    predicted_class = np.argmax(predicted_probs)
    actual_class = y_test[img, 0]
    success = predicted_class != actual_class
    cdiff = prior_probs[actual_class] - predicted_probs[actual_class]

    # Show the best attempt at a solution (successful or not)
    # helper.plot_image(attack_image, actual_class, class_names, predicted_class)

    return [
        model.name, pixel_count, img, actual_class, predicted_class, success,
        cdiff, prior_probs, predicted_probs, attack_result.x
    ]
    def attack_success(self, x, img, target_class, model, targeted_attack=False, verbose=False, preprocessing_cb=None):
        # Perturb the image with the given pixel(s) and get the prediction of the model
        attack_image = helper.perturb_image(x, img.copy())

        if preprocessing_cb is not None:
            attack_image = preprocessing_cb(attack_image)

        confidence = model.predict(attack_image)[0]
        predicted_class = np.argmax(confidence)

        # If the prediction is what we want (misclassification or
        # targeted classification), return True
        if (verbose):
            print('Confidence {}: x={}'.format(confidence[target_class], x))
        if ((targeted_attack and predicted_class == target_class) or
            (not targeted_attack and predicted_class != target_class)):
            return True
    def attack(self, img, model, target=None, pixel_count=1, 
            maxiter=75, popsize=400, verbose=False, plot=False):
        # Change the target class based on whether this is a targeted attack or not
        targeted_attack = target is not None
        target_class = target if targeted_attack else self.y_test[img,0]
        
        # Define bounds for a flat vector of x,y,r,g,b values
        # For more pixels, repeat this layout
        dim_x, dim_y = self.dimensions
        bounds = [(0,dim_x), (0,dim_y), (0,256), (0,256), (0,256)] * pixel_count
        
        # Population multiplier, in terms of the size of the perturbation vector x
        popmul = max(1, popsize // len(bounds))
        
        # Format the predict/callback functions for the differential evolution algorithm
        predict_fn = lambda xs: self.predict_classes(
            xs, self.x_test[img], target_class, model, target is None)
        callback_fn = lambda x, convergence: self.attack_success(
            x, self.x_test[img], target_class, model, targeted_attack, verbose)
        
        # Call Scipy's Implementation of Differential Evolution
        attack_result = differential_evolution(
            predict_fn, bounds, maxiter=maxiter, popsize=popmul,
            recombination=1, atol=-1, callback=callback_fn, polish=False)

        # Calculate some useful statistics to return from this function
        attack_image = helper.perturb_image(attack_result.x, self.x_test[img])[0]
        prior_probs = model.predict(np.array([self.x_test[img]]))[0]
        predicted_probs = model.predict(np.array([attack_image]))[0]
        predicted_class = np.argmax(predicted_probs)
        actual_class = self.y_test[img,0]
        success = predicted_class != actual_class
        cdiff = prior_probs[actual_class] - predicted_probs[actual_class]

        # Show the best attempt at a solution (successful or not)
        if plot:
            helper.plot_image(attack_image, actual_class, self.class_names, predicted_class)

        return [model.name, pixel_count, img, actual_class, predicted_class, success, cdiff, prior_probs, predicted_probs, attack_result.x]
model = resnet
labels = labels.reshape(10, 1)
list = single_pixel_attack(images,
                           labels,
                           class_names,
                           images_id,
                           model,
                           pixel_count=10,
                           verbose=True,
                           plot=True)
print("扰动点坐标", list[10])

#获取敏感点在干净图片上的值
clean_rgb = clean_pixels(list[10], images[images_id])
#过滤敏感点 用干净值替代对抗样本对应位置 生成防御图
defense_image = helper.perturb_image(np.array(clean_rgb), adversarial)[0]
#绘画防御图
helper.plot_image(defense_image)
#绘画敏感点
helper.plot_image(adversarial - defense_image)

##########将扰动样本与敏感点化成一张图
plt.figure()
plt.subplot(1, 2, 1)
plt.xticks([])
plt.yticks([])
adv_images = adversarial - images[images_id]
plt.imshow(adv_images.astype(np.uint8))
plt.subplot(1, 2, 2)
sen_points = adversarial - defense_image
plt.imshow(sen_points.astype(np.uint8))
    print("For Image @ index {}, True label {}".format(target_img_idx,
                                                       names[target_img_idx]))

    preprocessed_tgt_image = mobilenet_preprocess_input(
        processed_images[target_img_idx, ].copy())
    prediction = model.predict(np.expand_dims(preprocessed_tgt_image, axis=0))
    # helper.plot_image(images[target_img_idx,])

    print("Model predictions")
    predictions = mobilenet_decode_predictions(prediction)[0]
    for item in predictions:
        print(item)

    # Change one pixel
    changed_pixel = np.array([200, 100, 255, 0, 255])
    attack_image = helper.perturb_image(
        changed_pixel, processed_images[target_img_idx, ].copy())[0]

    preprocessed_attack_image = mobilenet_preprocess_input(attack_image.copy())
    prediction = model.predict(
        np.expand_dims(preprocessed_attack_image, axis=0))

    print("Attacked Image prediction")
    predictions = mobilenet_decode_predictions(prediction)[0]
    for item in predictions:
        print(item)

    # -----------------------------------------------------------------------------------
    # Single Image Attack
    # -----------------------------------------------------------------------------------
    print("Single Image Attack ...")
    start_time = datetime.now()
 def predict_classes(self, xs, img, target_class, model, minimize=True):
     # Perturb the image with the given pixel(s) x and get the prediction of the model
     imgs_perturbed = helper.perturb_image(xs, img)
     predictions = model.predict(imgs_perturbed)[:,target_class]
     # This function should always be minimized, so return its complement if needed
     return predictions if minimize else 1 - predictions
Exemple #15
0
network_stats, correct_imgs = helper.evaluate_models(models, images, labels)
correct_imgs = pd.DataFrame(
    correct_imgs, columns=['name', 'img', 'label', 'confidence', 'pred'])
network_stats = pd.DataFrame(network_stats,
                             columns=['name', 'accuracy', 'param_count'])

print(network_stats)

pixel = np.array([16, 20, 0, 255, 255])
model = resnet

image_id = 1
true_class = labels[image_id, 0]
prior_confidence = model.predict_one(images[image_id])[true_class]
confidence = helper.predict_classes(pixel, images[image_id], true_class,
                                    model)[0]

print('Confidence in true class', class_names[true_class], 'is', confidence)
print('Prior confidence was', prior_confidence)
helper.plot_image(helper.perturb_image(pixel, images[image_id])[0])

list = attack(images,
              labels,
              class_names,
              image_id,
              model,
              pixel_count=3,
              verbose=True,
              plot=True)
print(list[10])
Exemple #16
0
def single_pixel_attack(images,
                        labels,
                        class_names,
                        img_id,
                        model,
                        target=None,
                        pixel_count=1,
                        maxiter=75,
                        popsize=400,
                        verbose=False,
                        plot=False,
                        dimensions=(32, 32)):
    # Change the target class based on whether this is a targeted attack or not
    targeted_attack = target is not None
    target_class = target if targeted_attack else labels[img_id, 0]

    ack = PixelAttacker([model], (images, labels), class_names)

    # Define bounds for a flat vector of x,y,r,g,b values
    # For more pixels, repeat this layout
    dim_x, dim_y = dimensions
    bounds = [(0, dim_x), (0, dim_y), (0, 256), (0, 256),
              (0, 256)] * pixel_count

    # Population multiplier, in terms of the size of the perturbation vector x
    popmul = max(1, popsize // len(bounds))

    # Format the predict/callback functions for the differential evolution algorithm
    def predict_fn(xs):
        return ack.predict_classes(xs, images[img_id], target_class, model,
                                   target is None)

    def callback_fn(x, convergence):
        return ack.attack_success(x, images[img_id], target_class, model,
                                  targeted_attack, verbose)

    # Call Scipy's Implementation of Differential Evolution
    attack_result = differential_evolution(predict_fn,
                                           bounds,
                                           maxiter=maxiter,
                                           popsize=popmul,
                                           recombination=1,
                                           atol=-1,
                                           callback=callback_fn,
                                           polish=False)

    # Calculate some useful statistics to return from this function
    attack_image = helper.perturb_image(attack_result.x, images[img_id])[0]
    prior_probs = model.predict(np.array([images[img_id]]))[0]
    predicted_probs = model.predict(np.array([attack_image]))[0]
    predicted_class = np.argmax(predicted_probs)
    actual_class = labels[img_id, 0]
    success = predicted_class != actual_class
    cdiff = prior_probs[actual_class] - predicted_probs[actual_class]

    # Show the best attempt at a solution (successful or not)
    if plot:
        helper.plot_image(attack_image, actual_class, class_names,
                          predicted_class)

    return [
        attack_image, model.name, pixel_count, img_id, actual_class,
        predicted_class, success, cdiff, prior_probs, predicted_probs,
        attack_result.x
    ]
Exemple #17
0
#加载模型
model = keras.applications.ResNet50()

#processed_images = preprocess_input(images.copy()) # Prepare the image for the model
processed_images = images
predictions = model.predict(
    processed_images)  # Get the predicted probabilities for each class
label = decode_predictions(
    predictions)  # Convert the probabilities to class labels

print(label)

#扰动图片
pixel = np.array([112, 112, 255, 255, 0])  # pixel = x,y,r,g,b
image_perturbed = helper.perturb_image(pixel, images)
helper.plot_image(image_perturbed[0])

#preprocessed_perturbed = preprocess_input(image_perturbed.copy())
preprocessed_perturbed = image_perturbed
predictions = model.predict(preprocessed_perturbed)
label = decode_predictions(predictions)

print(label)
'''
#攻击
# Should output /device:GPU:0
K.tensorflow_backend._get_available_gpus()
models = [model]

test = processed_images, np.array([labels])