Esempio n. 1
0
def main(args):
    assert args.dataset in ['mnist', 'cifar', 'svhn'], \
        "Dataset parameter must be either 'mnist', 'cifar' or 'svhn'"
    assert args.attack in ['fgsm', 'bim-a', 'bim-b', 'jsma', 'cw-l2', 'all', 'cw-lid'], \
        "Attack parameter must be either 'fgsm', 'bim-a', 'bim-b', " \
        "'jsma', 'cw-l2', 'all' or 'cw-lid' for attacking LID detector"
    model_file = os.path.join(PATH_DATA, "model_%s.h5" % args.dataset)
    # model_file = "../data_v1/model_%s.h5" % args.dataset
    print(model_file)
    assert os.path.isfile(model_file), \
        'model file not found... must first train model using train_model.py.'
    if args.dataset == 'svhn' and args.attack == 'cw-l2':
        assert args.batch_size == 16, \
        "svhn has 26032 test images, the batch_size for cw-l2 attack should be 16, " \
        "otherwise, there will be error at the last batch!"

    print('Dataset: %s. Attack: %s' % (args.dataset, args.attack))
    # Create TF session, set it as Keras backend
    sess = tf.Session()
    K.set_session(sess)
    if args.attack == 'cw-l2' or args.attack == 'cw-lid':
        warnings.warn("Important: remove the softmax layer for cw attacks!")
        # use softmax=False to load without softmax layer
        model = get_model(args.dataset, softmax=False)
        model.compile(loss=cross_entropy,
                      optimizer='adadelta',
                      metrics=['accuracy'])
        model.load_weights(model_file)
    else:
        model = load_model(model_file)

    _, _, X_test, Y_test = get_data(args.dataset)
    _, acc = model.evaluate(X_test,
                            Y_test,
                            batch_size=args.batch_size,
                            verbose=0)
    print("Accuracy on the test set: %0.2f%%" % (100 * acc))

    if args.attack == 'cw-lid':  # breaking LID detector - test
        X_test = X_test[:1000]
        Y_test = Y_test[:1000]

    if args.attack == 'all':
        # Cycle through all attacks
        for attack in ['fgsm']:
            craft_one_type(sess, model, X_test, Y_test, args.dataset, attack,
                           args.batch_size)
    else:
        # Craft one specific attack type
        craft_one_type(sess, model, X_test, Y_test, args.dataset, args.attack,
                       args.batch_size)
    print('Adversarial samples crafted and saved to %s ' % PATH_DATA)
    sess.close()
Esempio n. 2
0
        atck = 'cw_pgd'
        f = open(os.path.join(args.log_dir, 'Adv_%s_%s.p' % (dataset, atck)),
                 "w")
    else:
        f = open(os.path.join(args.log_dir, "custom.p"), "w")
    pickle.dump({"adv_input": adv_x_samples, "adv_labels": adv_y_samples}, f)
    f.close()


with tf.Session() as sess:

    dataset = 'mnist'
    K.set_session(sess)
    K.set_image_data_format('channels_first')

    _, _, X_test, Y_test = get_data(dataset)
    num_samples = np.shape(X_test)[0]
    num_rand_samples = 1328
    random_samples = np.random.randint(0, num_samples, num_rand_samples)
    new_X_test = np.zeros((num_rand_samples, 1, 28, 28))
    for i, sample_no in enumerate(random_samples):
        new_X_test[i, 0, :, :] = (X_test[sample_no, :, :, 0])
    new_Y_test = Y_test[random_samples, :]

    f = open(os.path.join(args.log_dir, 'Random_Test_%s_.p' % (dataset)), 'w')
    pickle.dump({"adv_input": new_X_test, "adv_labels": new_Y_test}, f)
    f.close()
    if (args.attack == 'cw-l2' or args.attack == 'all'):
        #No softmax for Carlini attack
        pytorch_network = Net()
        pytorch_network.load_state_dict(torch.load(args_ckpt))