Example #1
0
def get_logits_over_interval(sess,
                             model,
                             x_data,
                             fgsm_params,
                             min_epsilon=-10.,
                             max_epsilon=10.,
                             num_points=21):
    """Get logits when the input is perturbed in an interval in adv direction.

    Args:
        sess: Tf session
        model: Model for which we wish to get logits.
        x_data: Numpy array corresponding to single data.
                point of shape [height, width, channels].
        fgsm_params: Parameters for generating adversarial examples.
        min_epsilon: Minimum value of epsilon over the interval.
        max_epsilon: Maximum value of epsilon over the interval.
        num_points: Number of points used to interpolate.

    Returns:
        Numpy array containing logits.

    Raises:
        ValueError if min_epsilon is larger than max_epsilon.
    """
    # Get the height, width and number of channels
    height = x_data.shape[0]
    width = x_data.shape[1]
    channels = x_data.shape[2]
    size = height * width * channels

    x_data = np.expand_dims(x_data, axis=0)
    import tensorflow as tf
    from nmutant_attack.attacks import FastGradientMethod

    # Define the data placeholder
    x = tf.placeholder(dtype=tf.float32,
                       shape=[1, height, width, channels],
                       name='x')
    # Define adv_x
    fgsm = FastGradientMethod(model, sess=sess)
    adv_x = fgsm.generate(x, **fgsm_params)

    if min_epsilon > max_epsilon:
        raise ValueError('Minimum epsilon is less than maximum epsilon')

    eta = tf.nn.l2_normalize(adv_x - x, dim=0)
    epsilon = tf.reshape(
        tf.lin_space(float(min_epsilon), float(max_epsilon), num_points),
        (num_points, 1, 1, 1))
    lin_batch = x + epsilon * eta
    logits = model.get_logits(lin_batch)
    with sess.as_default():
        log_prob_adv_array = sess.run(logits, feed_dict={x: x_data})
    return log_prob_adv_array
Example #2
0
def fgsm(datasets,
         sample,
         model_name,
         store_path,
         step_size='0.3',
         batch_size=256,
         epoch=9,
         mu=False,
         mu_var='gf',
         de=False,
         attack='fgsm'):
    """
    :param datasets
    :param sample: inputs to attack
    :param target: the class want to generate
    :param nb_classes: number of output classes
    :return:
    """
    tf.reset_default_graph()
    X_train, Y_train, X_test, Y_test = get_data(datasets)
    input_shape, nb_classes = get_shape(datasets)

    print(epoch)
    # sess, preds, x, y, model, feed_dict = model_load(datasets, model_name, epoch=epoch)
    sess, preds, x, y, model, feed_dict = model_load(datasets,
                                                     model_name,
                                                     epoch=epoch,
                                                     mu=mu,
                                                     mu_var=mu_var,
                                                     de=de,
                                                     attack=attack)

    ###########################################################################
    # Craft adversarial examples using the FGSM approach
    ###########################################################################
    # Initialize the Fast Gradient Sign Method (FGSM) attack object and
    # graph
    '''
    if 'mnist' == datasets:
        #sample = np.asarray([np.asarray(imread(sample_path)).reshape(28,28,1)]).astype('float32')
        #sample = preprocess_image_1(sample)
        print('1')
    elif 'cifar10' == datasets:
        sample = np.asarray([np.asarray(imread(sample_path)).reshape(32,32,3)]).astype('float32')
        sample = preprocess_image_1(sample)
    elif 'svhn' == datasets:
        sample = np.asarray([np.asarray(imread(sample_path)).reshape(32,32,3)]).astype('float32')
        sample = preprocess_image_1(sample)
    #print(sample.shape)
    '''

    probabilities = model_prediction(sess, x, preds, sample, feed=feed_dict)

    if sample.shape[0] == 1:
        current_class = np.argmax(probabilities)
    else:
        current_class = np.argmax(probabilities, axis=1)

    if not os.path.exists(store_path):
        os.makedirs(store_path)

    # only for correct:
    acc_pre_index = []
    for i in range(0, sample.shape[0]):
        if current_class[i] == np.argmax(Y_test[i]):
            acc_pre_index.append(i)

    sample_acc = np.zeros(shape=(len(acc_pre_index), input_shape[1],
                                 input_shape[2], input_shape[3]),
                          dtype='float32')
    probabilities_acc = np.zeros(shape=(len(acc_pre_index), nb_classes),
                                 dtype='float32')
    current_class_acc = np.zeros(shape=(len(acc_pre_index)), dtype=int)

    for i in range(0, len(acc_pre_index)):
        sample_acc[i] = sample[acc_pre_index[i]]
        probabilities_acc[i] = probabilities[acc_pre_index[i]]
        current_class_acc[i] = current_class[acc_pre_index[i]]
    print('Start generating adv. example')
    #print(float(step_size))
    if 'mnist' == datasets:
        fgsm_params = {'eps': float(step_size), 'clip_min': 0., 'clip_max': 1.}
    elif 'cifar10' == datasets:
        fgsm_params = {'eps': float(step_size), 'clip_min': 0., 'clip_max': 1.}
    elif 'svhn' == datasets:
        fgsm_params = {'eps': float(step_size), 'clip_min': 0., 'clip_max': 1.}
    fgsm = FastGradientMethod(model, sess=sess)
    adv_x = fgsm.generate(x, **fgsm_params)

    nb_batches = int(math.ceil(float(sample_acc.shape[0]) / batch_size))
    suc = 0
    for batch in range(nb_batches):
        #start, end = batch_indices(batch, sample_acc.shape[0], batch_size)
        print(batch)
        start = batch * batch_size
        end = (batch + 1) * batch_size
        if end > sample_acc.shape[0]:
            end = sample_acc.shape[0]
        adv = sess.run(adv_x,
                       feed_dict={
                           x: sample_acc[start:end],
                           y: probabilities_acc[start:end]
                       })

        #adv_img_deprocessed = deprocess_image_1(adv)
        #adv:float 0-1 numpy.save("filename.npy",a)

        # Check if success was achieved
        #probabilities = model_prediction(sess, x, preds, sample, feed=feed_dict)
        new_class_label = model_argmax(
            sess, x, preds, adv,
            feed=feed_dict)  # Predicted class of the generated adversary
        for i in range(0, len(new_class_label)):
            j = batch * batch_size + i
            if new_class_label[i] != current_class_acc[j]:
                suc += 1
                path = store_path + '/' + str(acc_pre_index[j]) + '_' + str(
                    time.time() * 1000) + '_' + str(
                        current_class_acc[j]) + '_' + str(new_class_label[i])
                #path = store_path + '/' + str(acc_pre_index[j]) + '_' + str(time.time()*1000) + '_' + str(current_class_acc[j]) + '_' + str(new_class_label[i])+'.png'
                np.save(path, adv[i])
                #print(adv[i].shape)
                #imsave(path, adv[i])
    # Close TF session
    sess.close()

    return suc, len(acc_pre_index)
Example #3
0
def blackbox(datasets,
             sample,
             model_name,
             submodel_name,
             store_path,
             step_size=0.3,
             batch_size=256):
    """
    the black-box attack from arxiv.org/abs/1602.02697
    :param datasets
    :param sample: inputs to attack
    :param target: the class want to generate
    :param nb_classes: number of output classes
    :return:
    """
    # Simulate the black-box model locally
    # You could replace this by a remote labeling API for instance
    print("Preparing the black-box model.")
    tf.reset_default_graph()
    X_train, Y_train, X_test, Y_test = get_data(datasets)
    input_shape, nb_classes = get_shape(datasets)
    sess, bbox_preds, x, y, model, feed_dict = model_load(datasets, model_name)

    # Train substitute using method from https://arxiv.org/abs/1602.02697
    print("Preparing the substitute model.")
    model_sub, preds_sub = sub_model_load(sess, datasets, submodel_name,
                                          model_name)

    ###########################################################################
    # Craft adversarial examples using the Blackbox approach
    ###########################################################################
    # Initialize the Fast Gradient Sign Method (FGSM) attack object.
    '''
    if 'mnist' == datasets:
        sample = np.asarray([np.asarray(imread(sample_path)).reshape(28,28,1)]).astype('float32')
        sample = preprocess_image_1(sample)
    elif 'cifar10' == datasets:
        sample = np.asarray([np.asarray(imread(sample_path)).reshape(32,32,3)]).astype('float32')
        sample = preprocess_image_1(sample)
    elif 'svhn' == datasets:
        sample = np.asarray([np.asarray(imread(sample_path)).reshape(32,32,3)]).astype('float32')
        sample = preprocess_image_1(sample)
    '''

    probabilities = model_prediction(sess, x, model(x), sample, feed=feed_dict)
    if sample.shape[0] == 1:
        current_class = np.argmax(probabilities)
    else:
        current_class = np.argmax(probabilities, axis=1)

    if not os.path.exists(store_path):
        os.makedirs(store_path)

    # only for correct:
    acc_pre_index = []
    for i in range(0, sample.shape[0]):
        if current_class[i] == np.argmax(Y_test[i]):
            acc_pre_index.append(i)

    sample_acc = np.zeros(shape=(len(acc_pre_index), input_shape[1],
                                 input_shape[2], input_shape[3]),
                          dtype='float32')
    probabilities_acc = np.zeros(shape=(len(acc_pre_index), nb_classes),
                                 dtype='float32')
    current_class_acc = np.zeros(shape=(len(acc_pre_index)), dtype=int)

    for i in range(0, len(acc_pre_index)):
        sample_acc[i] = sample[acc_pre_index[i]]
        probabilities_acc[i] = probabilities[acc_pre_index[i]]
        current_class_acc[i] = current_class[acc_pre_index[i]]

    if datasets == 'mnist':
        fgsm_par = {
            'eps': step_size,
            'ord': np.inf,
            'clip_min': 0.,
            'clip_max': 1.
        }
    elif 'cifar10' == datasets:
        fgsm_par = {
            'eps': step_size,
            'ord': np.inf,
            'clip_min': 0.,
            'clip_max': 1.
        }
    elif 'svhn' == datasets:
        fgsm_par = {
            'eps': step_size,
            'ord': np.inf,
            'clip_min': 0.,
            'clip_max': 1.
        }
    fgsm = FastGradientMethod(model_sub, sess=sess)

    # Craft adversarial examples using the substitute
    x_adv_sub = fgsm.generate(x, **fgsm_par)

    nb_batches = int(math.ceil(float(sample_acc.shape[0]) / batch_size))
    suc = 0
    for batch in range(nb_batches):
        #start, end = batch_indices(batch, sample_acc.shape[0], batch_size)
        print(batch)
        start = batch * batch_size
        end = (batch + 1) * batch_size
        if end > sample_acc.shape[0]:
            end = sample_acc.shape[0]
        adv = sess.run(x_adv_sub,
                       feed_dict={
                           x: sample_acc[start:end],
                           y: probabilities_acc[start:end]
                       })
        adv_img_deprocessed = deprocess_image_1(adv)

        # Check if success was achieved
        #probabilities = model_prediction(sess, x, preds, sample, feed=feed_dict)
        new_class_label = model_argmax(
            sess, x, model(x), adv,
            feed=feed_dict)  # Predicted class of the generated adversary
        for i in range(0, len(new_class_label)):
            j = batch * batch_size + i
            if new_class_label[i] != current_class_acc[j]:
                suc += 1
                path = store_path + '/' + str(j) + '_' + str(
                    time.time() * 1000) + '_' + str(
                        current_class_acc[j]) + '_' + str(
                            new_class_label[i]) + '.png'
                imsave(path, adv_img_deprocessed[i])
    # Close TF session
    sess.close()

    return suc, len(acc_pre_index)
    '''