Beispiel #1
0
def get_at_model(sess, x_train, y_train, x_test, y_test):
    x_train, y_train, x_test, y_test = process_data(x_train, y_train, x_test, y_test)
    model = get_vanilla_model()
    wrap = KerasModelWrapper(model)
    fgsm = FastGradientMethod(wrap, sess=sess)
    fgsm_params = {'eps': 0.3}
    adv_x = fgsm.generate(x, **fgsm_params)
    adv_x = tf.stop_gradient(adv_x)
    """
    def evaluate_2():
        # Accuracy of adversarially trained model on legitimate test inputs
        eval_params = {'batch_size': batch_size}
        accuracy = model_eval(sess, x, y, model_at(x), x_test, y_test,
                              args=eval_params)
        print('Test accuracy on legitimate examples: %0.4f' % accuracy)

        # Accuracy of the adversarially trained model on adversarial examples
        accuracy = model_eval(sess, x, y, model_at(adv_x_at), x_test,
                              y_test, args=eval_params)
        print('Test accuracy on adversarial examples: %0.4f' % accuracy)
    """
    model_train(sess, x, y, model(x), x_train, y_train,
                 predictions_adv=model(adv_x), evaluate=None,#evaluate_2,
                 args=train_params, save=False)
    eval_par = {'batch_size': batch_size}
    x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
    x_test = x_test.astype('float32')
    x_test /= 255
    y_test = keras.utils.to_categorical(y_test, 10)
    acc = model_eval(sess, x, y, model(x), x_test, y_test, args=eval_par)
    print('Test accuracy on test examples: %0.4f\n' % acc)
    acc = model_eval(sess, x, y, model(adv_x), x_test, y_test, args=eval_par)
    print('Test accuracy on adversarial examples: %0.4f\n' % acc)

    return model_at
Beispiel #2
0
def get_DeepFool_adversarial(targeted, xs, classifier, batch_size):

    # Targeted DeepFool attack not possible
    if targeted:
        print('DeepFool attack cannot be targeted.')
        exit()

    ATTACK_BATCH = batch_size
    samples_range = int(xs.shape[0] / ATTACK_BATCH)

    wrap = KerasModelWrapper(classifier)
    attack = DeepFool(wrap, sess=K.get_session())
    fgsm_params = {
        'overshoot': 0.02,
        'max_iter': 50,
        'nb_candidate': 2,
        'clip_min': -5,
        'clip_max': 5
    }

    attack_xs = attack.generate_np(xs[:ATTACK_BATCH, :, :, :], **fgsm_params)
    for ii in range(1, samples_range):
        print('ITER', ii)
        new_attack_batch = attack.generate_np(
            xs[ii * ATTACK_BATCH:(ii + 1) * ATTACK_BATCH, :, :, :],
            **fgsm_params)
        attack_xs = np.concatenate((attack_xs, new_attack_batch), axis=0)
    return attack_xs
Beispiel #3
0
def FGSM(model, x, y):
    sess = K.get_session()
    model_wrap = KerasModelWrapper(model)
    fgsm = FastGradientMethod(model_wrap, sess=sess)
    fgsm_params = {'y': y, 'eps': 0.2, 'clip_min': 0., 'clip_max': 1.}
    adv = fgsm.generate_np(x, **fgsm_params)
    return adv
Beispiel #4
0
def generate_jsma_examples(sess, model, x, y, X, Y, attack_params, verbose,
                           attack_log_fpath):
    """
    Targeted attack, with target classes in Y.
    """
    Y_target = Y

    nb_classes = Y.shape[1]

    jsma = SaliencyMapMethod(KerasModelWrapper(model), back='tf', sess=sess)
    jsma_params = {'theta': 1., 'gamma': 0.1, 'clip_min': 0., 'clip_max': 1.}

    adv_x_list = []

    with click.progressbar(range(0, len(X)),
                           file=sys.stderr,
                           show_pos=True,
                           width=40,
                           bar_template='  [%(bar)s] JSMA Attacking %(info)s',
                           fill_char='>',
                           empty_char='-') as bar:
        # Loop over the samples we want to perturb into adversarial examples
        for sample_ind in bar:
            sample = X[sample_ind:(sample_ind + 1)]

            adv_x = jsma.generate_np(sample, **jsma_params)
            adv_x_list.append(adv_x)

    return np.vstack(adv_x_list)
Beispiel #5
0
	def train(self, X_tr, y_tr, X_val, y_val, dataObject, model):
		datagen = dataObject.data_generator()
		datagen.fit(X_tr)
		attacks = FLAGS.attack.split(',')
		if len(attacks) > 1:
			attacks = attacks[1:]
			attack_params = []
			clever_wrapper = KerasModelWrapper(model)
			for attack in attacks:
				attack_params.append(helpers.get_appropriate_attack(FLAGS.dataset, dataObject.get_range(), attack, clever_wrapper, common.sess, harden=True, attack_type="None"))
		else:
			attack_params=None
		def scheduler(epoch):
			if epoch <= 75:
				return 0.1
			if epoch <= 115:
				return 0.01
			return 0.001
		early_stop = None
		if FLAGS.early_stopping:
			print("Early stopping activated")
			early_stop = (0.005, 20) # min_delta, patience
		lr_plateau = None
		if FLAGS.lr_plateau:
			print("Dynamic LR activated")
			lr_plateau = (0.001, 0.1, 10, 0.005) # min_lr, factor, patience, min_delta
		if FLAGS.lr_plateau or FLAGS.early_stopping:
			print("LR scheduler disabled")
			scheduler = None # Override scheduler
		helpers.customTrainModel(model, X_tr, y_tr, X_val, y_val, datagen, self.nb_epochs, scheduler, self.batch_size, attacks=attack_params, early_stop=early_stop, lr_plateau=lr_plateau)
Beispiel #6
0
    def __init__(self,
                 model,
                 source_samples=2,
                 binary_search_steps=5,
                 cw_learning_rate=5e-3,
                 confidence=0,
                 attack_iterations=1000,
                 attack_initial_const=1e-2):
        super(Attack, self).__init__()

        model_wrap = KerasModelWrapper(model.model)
        self.model = model_wrap
        self.sess = model.sess

        self.x = model.input_ph
        self.y = Input(shape=(model.num_classes, ), dtype='float32')

        abort_early = True
        self.cw = CarliniWagnerL2(self.model, sess=self.sess)
        self.cw_params = {
            'binary_search_steps': binary_search_steps,
            "y": None,
            'abort_early': True,
            'max_iterations': attack_iterations,
            'learning_rate': cw_learning_rate,
            'batch_size': source_samples,
            'initial_const': attack_initial_const,
            'confidence': confidence,
            'clip_min': 0.0,
        }
def adversarial_noise(model,
                      image,
                      target_class,
                      noise_limit,
                      sess,
                      confidence=0.99,
                      eps=1.0,
                      max_iter=200):
    original = np.expand_dims(image, axis=0)
    target = np.array([target_class])
    encoded_target = to_categorical(target, num_classes=1000)

    wrap = KerasModelWrapper(model)
    fgsm = FastGradientMethod(wrap, sess=sess)
    fgsm_params = {
        'eps': eps,
        'clip_min': 0.0,
        'clip_max': 255.0,
        'y_target': encoded_target
    }

    noisy = original
    for i in range(0, max_iter):
        noisy = fgsm.generate_np(noisy, **fgsm_params)
        current_confidence = model.predict(prep_image(noisy))[0][target_class]
        print(decode_predictions(model.predict(prep_image(noisy)), top=3)[0])
        print(current_confidence)
        if current_confidence > confidence:
            break

    return np.reshape(noisy, noisy.shape[1:])
def attacks(model, X_test, Y_test):
	from keras import backend as K

	K.set_learning_phase(1)	
	run_config = tf.ConfigProto()
	run_config.gpu_options.allow_growth=True
		
	with tf.Session(config=run_config) as sess:
		# Define input TF placeholder
		x = tf.placeholder(tf.float32, shape=(None, *X_test.shape[1:]))
		y = tf.placeholder(tf.float32, shape=(None, *Y_test.shape[1:]))
	
	
		wrap = KerasModelWrapper(model)
		fgsm = FastGradientMethod(wrap, sess=sess)
		fgsm_params = {'eps': 0.3,
		                   'clip_min': 0.,
		                   'clip_max': 1.}
		adv_x = fgsm.generate(x, **fgsm_params)
		# Consider the attack to be constant
		adv_x = tf.stop_gradient(adv_x)
		preds_adv = model(adv_x)
	
		eval_par = {'batch_size': 10}
		initialize_uninitialized_global_variables(sess)
		acc = model_eval(sess, x, y, preds_adv, X_test, Y_test, args=eval_par)

	return acc
def train_sub_no_augmn(data, target_model, sess):
    print("Loading a substitute model...")

    x = tf.placeholder(tf.float32,
                       shape=(None, BBOX_IMAGE_SIZE, BBOX_IMAGE_SIZE,
                              NUM_OF_CHANNELS))

    model = get_model_category_by_id(SUBSTITUTE_MODEL_ID)
    # model = get_simple_model(num_classes=NB_CLASSES, image_size=IMAGE_SIZE)
    #model.compile(optimizer=SGD(), loss="categorical_crossentropy", metrics=[age_mae])
    model_sub = KerasModelWrapper(model)

    print("Substitute model loaded")

    print("Labeling samples...")
    labels = bbox_predict(target_model, data, sess, x, batch_size=1)
    print("Samples labeled")

    print("Training a substitute model...")
    train_gen = TransferGenerator(data=data,
                                  labels=labels,
                                  num_classes=NB_CLASSES,
                                  batch_size=BATCH_SIZE,
                                  image_size=SUB_IMAGE_SIZE)
    model_sub.model.fit_generator(generator=train_gen,
                                  epochs=NUM_EPOCHS,
                                  verbose=1)
    print("Subsitute model trained")

    return model_sub
Beispiel #10
0
def generate_adversarial(original_input, method, model,
                         target=None, target_class=None, sess=None, **kwargs):
    if not hasattr(generate_adversarial, "attack_types"):
        generate_adversarial.attack_types = {
            'fgsm': FastGradientMethod,
            'jsma': SaliencyMapMethod,
            'cw': CarliniWagnerL2,
            'bim': BasicIterativeMethod
        }

    if sess is None:
        sess = K.get_session()

    if method in generate_adversarial.attack_types:
        attacker = generate_adversarial.attack_types[method](KerasModelWrapper(model), sess)
    else:
        raise Exception("Method not supported")

    if type(original_input) is list:
        original_input = np.asarray(original_input)
    else:
        original_input = np.asarray([original_input])

        if target_class is not None:
            target_class = [target_class]

    if target is None and target_class is not None:
        target = np.zeros((len(target_class), model.output_shape[1]))
        target[np.arange(len(target_class)), target_class] = 1

    if target is not None:
        kwargs['y_target'] = target

    return attacker.generate_np(original_input, **kwargs)
Beispiel #11
0
def untargeted_attack(model, images):

    sess = backend.get_session()
    wrap = KerasModelWrapper(model)
    df = DeepFool(wrap, back='tf', sess=sess)
    adv_x = df.generate_np(images)
    return adv_x
Beispiel #12
0
def main(argv=None):
    dataObject = data_load.get_appropriate_data(FLAGS.dataset)(None, None)
    datagen = dataObject.data_generator()
    (X_train, Y_train), (X_test, Y_test) = dataObject.get_blackbox_data()
    (X_validation, Y_validation) = dataObject.get_validation_data()
    datagen.fit(X_train)
    n_classes, is_mnist = Y_train.shape[1], (FLAGS.dataset == "mnist")
    if is_mnist:
        model, _ = lenet.lenet_network(n_classes=10, is_mnist=is_mnist)
    else:
        model, _ = resnet.residual_network(n_classes=n_classes,
                                           stack_n=FLAGS.stack_n,
                                           mnist=is_mnist,
                                           get_logits=False)
    attack, attack_params = helpers.get_appropriate_attack(
        FLAGS.dataset,
        dataObject.get_range(),
        FLAGS.attack_name,
        KerasModelWrapper(model),
        common.sess,
        harden=True,
        attack_type="None")
    helpers.customTrainModel(model,
                             X_train,
                             Y_train,
                             X_validation,
                             Y_validation,
                             datagen,
                             FLAGS.nb_epochs,
                             densenet.scheduler,
                             FLAGS.batch_size,
                             attacks=[(attack, attack_params)])
    model.save(FLAGS.save_here)
Beispiel #13
0
 def __init__(self):
     Model.__init__(self)
     from keras.applications.vgg16 import VGG16, preprocess_input, decode_predictions
     self.keras_model = VGG16(weights='imagenet')
     self.model = KerasModelWrapper(self.keras_model)
     self.preprocess_input = preprocess_input
     self.decode_predictions = decode_predictions
Beispiel #14
0
def fgsm_attack_iter(model, x_input, input_img, sess, n):
    wrap = KerasModelWrapper(model)
    imgs_stamp_tf.append(input_img)

    fgsm = FastGradientMethod(wrap)
    fgsm_params = {'eps': 0.1, 'clip_min': -1., 'clip_max': 1.}
    import time
    start_time = time.time()
    x_adv = fgsm.generate(x_input, **fgsm_params)
    for i in range(n):
        if i == 0:
            adv_image = sess.run(x_adv, feed_dict={x_input: input_img})
        else:
            adv_image = sess.run(x_adv, feed_dict={x_input: adv_image})
        imgs_stamp_tf.append(adv_image)

    attack_time = time.time() - start_time

    ## save gif image ##
    """
    sv_img = []
    for img in adv_iamges :
        d_img = deprocess(img[0]).astype(np.uint8)
        sv_img.append(Image.fromarray(d_img))
    
    print('save gif image.')
    sv_img[0].save('anitest.gif',
               save_all=True,
               append_images=sv_img[1:],
               duration=100,
               loop=0)
    """
    return adv_image, attack_time
def main(argv=None):
    dataObject = data_load.get_appropriate_data(FLAGS.dataset)(None, None)
    datagen = dataObject.data_generator()
    atack_X, attack_Y = None, None
    if FLAGS.mode == "harden":
        attack_X, attack_Y = dataObject.get_hardening_data()
    elif FLAGS.mode == "attack":
        attack_X, attack_Y = dataObject.get_attack_data()
    else:
        raise Exception("Invalid mode specified!")
        exit()
    n_classes, model = attack_Y.shape[1], load_model(FLAGS.model)
    attack, attack_params = helpers.get_appropriate_attack(
        FLAGS.dataset,
        dataObject.get_range(),
        FLAGS.attack_name,
        KerasModelWrapper(model),
        common.sess,
        harden=True,
        attack_type="None")
    perturbed_X = helpers.performBatchwiseAttack(attack_X, attack,
                                                 attack_params,
                                                 FLAGS.batch_size)
    fooled_rate = 1 - model.evaluate(
        perturbed_X, attack_Y, batch_size=FLAGS.batch_size)[1]
    print("\nError on adversarial examples: %f" % (fooled_rate))
    if FLAGS.save_here:
        np.save(FLAGS.save_here + "_x.npy", perturbed_X)
        np.save(FLAGS.save_here + "_y.npy", attack_Y)
Beispiel #16
0
def generate_model():
    model2 = Sequential()
    model2.add(Convolution2D(32, 3, 3, border_mode='same', input_shape=(32,32,3)))
    model2.add(Activation('relu'))
    model2.add(Convolution2D(32,3,3, border_mode='same'))
    model2.add(Activation('relu'))
    model2.add(MaxPooling2D(pool_size=(2, 2)))
    model2.add(Dropout(0.25))
    model2.add(Convolution2D(64, 3, 3, border_mode='same'))
    model2.add(Activation('relu'))
    model2.add(Convolution2D(64,3,3, border_mode='same'))
    model2.add(Activation('relu'))
    model2.add(MaxPooling2D(pool_size=(2, 2)))
    model2.add(Dropout(0.25))
    model2.add(Convolution2D(128, 3, 3, border_mode='same'))
    model2.add(Activation('relu'))
    model2.add(Convolution2D(128,3,3, border_mode='same'))
    model2.add(Activation('relu'))
    model2.add(MaxPooling2D(pool_size=(2, 2)))
    model2.add(Dropout(0.25))
    model2.add(Flatten())
    model2.add(Dense(512))
    model2.add(Activation('relu'))
    model2.add(Dropout(0.5))
    model2.add(Dense(256))
    model2.add(Activation('relu'))
    model2.add(Dropout(0.5))
    model2.add(Dense(10, activation='softmax'))
    # Compile the model
    model2.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])

    return KerasModelWrapper(model2)
Beispiel #17
0
def generate(sess,
             model,
             data_feeder,
             source,
             target,
             adv_dump_dir,
             nb_samples,
             perturbation=0.35):
    wrap = KerasModelWrapper(model.model)
    fgsm = FastGradientMethod(wrap, sess=sess)
    fgsm_params = {
        'eps': perturbation,
        'clip_min': 0.0,
        'clip_max': 1.0,
        'y_target': data_feeder.get_labels(target, nb_samples)
    }

    craft_data = data_feeder.get_evasion_craft_data(source_class=source,
                                                    total_count=nb_samples)
    adv_data = fgsm.generate_np(craft_data, **fgsm_params)

    # Commit data
    adv_writer = AdversarialWriterEvasion(source_class=source,
                                          target_class=target,
                                          attack_params={'eps': perturbation},
                                          adversarial_data_path=adv_dump_dir)
    adv_writer.batch_put(craft_data, adv_data)
    adv_writer.commit()
 def _load_model(self, path):
     if os.path.exists(path):
         self.model = load_model(path)
         self.cleverhans_model = KerasModelWrapper(self.model)
         print("loaded keras model from ", path)
     else:
         print("keras model path not exit", path)
     return self.model
def prep_bbox():
    root_dir = os.path.dirname(os.path.dirname(__file__))
    relative_path = os.path.join(root_dir, BBOX_MODEL_PATH)
    model = load_model(relative_path, compile=False)
    model.compile(optimizer=Adam(), loss="categorical_crossentropy", metrics=[age_mae])
    wrap = KerasModelWrapper(model)
    print("Model loaded")
    return wrap
Beispiel #20
0
 def __init__(self, model, x=None, y=None):
     # :param: x: placeholder for inputs
     # :param: y: placeholder for labels
     self.keras_model = model
     model_wrap = KerasModelWrapper(model)
     self.predictions = model_wrap.get_logits(x)
     self.probs = tf.nn.softmax(logits=self.predictions)
     self.loss = tf.nn.softmax_cross_entropy_with_logits(
         labels=y, logits=self.predictions)
    def __init__(self, n_feats, n_classes, model, sess, lbl_enc, ord):
        self.sess = sess
        self.model = model
        self.ord = ord
        self.x = tf.placeholder(tf.float32, shape=(None, n_feats))
        self.y = tf.placeholder(tf.float32, shape=(None, n_classes))

        self.wrap = KerasModelWrapper(model)
        self.lbl_enc = lbl_enc
Beispiel #22
0
def build_attack(model, sess, eps=0.3, clip_min=0.0, clip_max=1.0):
    # Wrap model with cleverhans and init the attack method
    wrapped_model = KerasModelWrapper(model)
    pgd = ProjectedGradientDescent(wrapped_model, sess=sess)

    # Build acc and loss
    pgd_params = {"eps": eps, "clip_min": clip_min, "clip_max": clip_max}
    adv_acc_metric = get_adversarial_acc_metric(model, pgd, pgd_params)
    adv_loss = get_adversarial_loss(model, pgd, pgd_params)
    return pgd, adv_acc_metric, adv_loss
Beispiel #23
0
def lbfgs(model):
    wrap = KerasModelWrapper(model)
    att = LBFGS(wrap, sess=session)
    def attack(X):
        for i in tqdm(range(0, len(X), CHILD_BATCH_SIZE), desc=f'LBFGS: ', file=sys.stdout, leave=False):
            # print(X[i:i+CHILD_BATCH_SIZE].shape)
            tensor = tf.convert_to_tensor(X[i:i + CHILD_BATCH_SIZE])
            tensor = att.generate(tensor, batch_size=len(X[i:i + CHILD_BATCH_SIZE]), max_iterations=4, binary_search_steps=3)
            X[i:i + CHILD_BATCH_SIZE] = session.run(tensor)
    return attack
Beispiel #24
0
def fgsm(model):
    wrap = KerasModelWrapper(model)
    att = FastGradientMethod(wrap, sess=session)
    def attack(X, eps):
        for i in tqdm(range(0, len(X), CHILD_BATCH_SIZE), desc=f'FGSM: ', file=sys.stdout, leave=False):
            # print(X[i:i+CHILD_BATCH_SIZE].shape)
            tensor = tf.convert_to_tensor(X[i:i + CHILD_BATCH_SIZE])
            tensor = att.generate(tensor, eps=eps)
            X[i:i + CHILD_BATCH_SIZE] = session.run(tensor)
    return attack
Beispiel #25
0
def mim(model):
    wrap = KerasModelWrapper(model)
    att = MomentumIterativeMethod(wrap, sess=session)
    def attack(X, eps):
        for i in tqdm(range(0, len(X), CHILD_BATCH_SIZE), desc=f'MIM: ', file=sys.stdout, leave=False):
            # print(X[i:i+CHILD_BATCH_SIZE].shape)
            tensor = tf.convert_to_tensor(X[i:i + CHILD_BATCH_SIZE])
            tensor = att.generate(tensor, eps=eps, eps_iter=eps * 0.2)
            X[i:i + CHILD_BATCH_SIZE] = session.run(tensor)
    return attack
Beispiel #26
0
def build_attack(model, sess, eps=0.3, clip_min=0.0, clip_max=1.0):
    # Wrap model with cleverhans and init the attack method
    wrapped_model = KerasModelWrapper(model)
    fgsm = FastGradientMethod(wrapped_model, sess=sess)

    # Build acc and loss
    fgsm_params = {"eps": eps, "clip_min": clip_min, "clip_max": clip_max}
    adv_acc_metric = get_adversarial_acc_metric(model, fgsm, fgsm_params)
    adv_loss = get_adversarial_loss(model, fgsm, fgsm_params)
    return fgsm, adv_acc_metric, adv_loss
Beispiel #27
0
 def lbfgs(X, which):
     wrapped = LBFGS(KerasModelWrapper(which.model), sess=session)
     X = X.copy()
     for i in tqdm(range(0, len(X), CHILD_BATCH_SIZE),
                   desc=f'batch: ',
                   leave=False):
         tensor = tf.convert_to_tensor(X[i:i + CHILD_BATCH_SIZE])
         tensor = wrapped.generate(tensor, eps=0.1)
         X[i:i + CHILD_BATCH_SIZE] = session.run(tensor)
     return X
Beispiel #28
0
def init_attack(model, attack_params_dict, sess):
    """
    Initialize the adversarial attack using the cleverhans toolbox

    Parameters
    ----------
    model : Keras Model
        The model to attack

    attack_params_dict : dict
        Self-defined dictionary specifying the attack and its parameters

    sess : Session
        The current tf session

    Returns
    -------
    attack : cleverhans Attack
        The Attack object

    attack_params
        Dictionary with the value of the attack parameters, valid to generate
        adversarial examples with cleverhans.
    """

    # Wrapper for the Keras model
    model_wrap = KerasModelWrapper(model)

    # Initialize attack
    batch_size = None
    if attack_params_dict['attack'] == 'fgsm':
        attack = FastGradientMethod(model_wrap, sess=sess)
        attack_params = {'eps': attack_params_dict['eps'], 'clip_min': 0., 
                         'clip_max': 1.}
    elif attack_params_dict['attack'] == 'spsa':
        attack = SPSA(model_wrap, sess=sess)
        attack_params = {'epsilon': attack_params_dict['eps'], 
                         'num_steps': attack_params_dict['n_steps']}
        batch_size = 1
    elif attack_params_dict['attack'] == 'deepfool':
        attack = DeepFool(model_wrap, sess=sess)
        attack_params = {'clip_min': 0., 'clip_max': 1.}
    elif attack_params_dict['attack'] == 'pgd':
        attack = ProjectedGradientDescent(model_wrap, sess=sess)
        attack_params = {'eps': attack_params_dict['eps'], 
                         'eps_iter': attack_params_dict['eps_iter'],
                         'nb_iter': attack_params_dict['n_steps'],
                         'clip_min': 0., 'clip_max': 1.}
    elif attack_params_dict['attack'] == 'carlini':
        attack = CarliniWagnerL2(model_wrap, sess=sess)
        attack_params = {'clip_min': 0., 'clip_max': 1.}
    else:
        raise NotImplementedError()

    return attack, attack_params, batch_size
Beispiel #29
0
def attack(img, d, eps):
    adversarial = img.copy()
    target = to_categorical([TARGET_CLASS], num_classes=1000)
    wrap = KerasModelWrapper(d)
    
    fgsm = FastGradientMethod(wrap, sess=sess)
    fgsm_params = {'eps': eps, 'clip_min': -1.0, 'clip_max': 1.0,
                     'y_target': target}
   
    adversarial = fgsm.generate_np(adversarial,**fgsm_params)
    return adversarial[0]
Beispiel #30
0
def bim_attack(train_data, model, sess):
    wrap = KerasModelWrapper(model)
    bim = BasicIterativeMethod(wrap, sess=sess)
    bim_params = {
        'eps_iter': 0.01,
        'nb_iter': 10,
        'clip_min': 0.,
        'clip_max': 1.
    }
    adv_x = bim.generate_np(train_data, **bim_params)
    return adv_x