def main(argv=None):
    dataObject = data_load.get_appropriate_data(FLAGS.dataset)(None, None)
    datagen = dataObject.data_generator()
    atack_X, attack_Y = None, None
    if FLAGS.mode == "harden":
        attack_X, attack_Y = dataObject.get_hardening_data()
    elif FLAGS.mode == "attack":
        attack_X, attack_Y = dataObject.get_attack_data()
    else:
        raise Exception("Invalid mode specified!")
        exit()
    n_classes, model = attack_Y.shape[1], load_model(FLAGS.model)
    attack, attack_params = helpers.get_appropriate_attack(
        FLAGS.dataset,
        dataObject.get_range(),
        FLAGS.attack_name,
        KerasModelWrapper(model),
        common.sess,
        harden=True,
        attack_type="None")
    perturbed_X = helpers.performBatchwiseAttack(attack_X, attack,
                                                 attack_params,
                                                 FLAGS.batch_size)
    fooled_rate = 1 - model.evaluate(
        perturbed_X, attack_Y, batch_size=FLAGS.batch_size)[1]
    print("\nError on adversarial examples: %f" % (fooled_rate))
    if FLAGS.save_here:
        np.save(FLAGS.save_here + "_x.npy", perturbed_X)
        np.save(FLAGS.save_here + "_y.npy", attack_Y)
Exemple #2
0
def main(argv=None):
    dataObject = data_load.get_appropriate_data(FLAGS.dataset)(None, None)
    datagen = dataObject.data_generator()
    (X_train, Y_train), (X_test, Y_test) = dataObject.get_blackbox_data()
    (X_validation, Y_validation) = dataObject.get_validation_data()
    datagen.fit(X_train)
    n_classes, is_mnist = Y_train.shape[1], (FLAGS.dataset == "mnist")
    if is_mnist:
        model, _ = lenet.lenet_network(n_classes=10, is_mnist=is_mnist)
    else:
        model, _ = resnet.residual_network(n_classes=n_classes,
                                           stack_n=FLAGS.stack_n,
                                           mnist=is_mnist,
                                           get_logits=False)
    attack, attack_params = helpers.get_appropriate_attack(
        FLAGS.dataset,
        dataObject.get_range(),
        FLAGS.attack_name,
        KerasModelWrapper(model),
        common.sess,
        harden=True,
        attack_type="None")
    helpers.customTrainModel(model,
                             X_train,
                             Y_train,
                             X_validation,
                             Y_validation,
                             datagen,
                             FLAGS.nb_epochs,
                             densenet.scheduler,
                             FLAGS.batch_size,
                             attacks=[(attack, attack_params)])
    model.save(FLAGS.save_here)
Exemple #3
0
def main(argv=None):
	if FLAGS.dataset not in ['cifar10', 'mnist', 'svhn']:
		print "Invalid dataset specified. Exiting"
		exit()
	bag = Bagging(10, FLAGS.batch_size, FLAGS.nb_epochs)
	custom_X, custom_Y = None, None
	if len(FLAGS.data_x) > 1 and len(FLAGS.data_y) > 1 and FLAGS.mode in ['finetune']:
		custom_X, custom_Y = np.load(FLAGS.data_x), np.load(FLAGS.data_y)
	dataObject = data_load.get_appropriate_data(FLAGS.dataset)(custom_X, custom_Y)
	(blackbox_Xtrain, blackbox_Ytrain), (X_test, Y_test) = dataObject.get_blackbox_data()
	if FLAGS.mode in ['finetune']:
		model = load_model(FLAGS.seed_model)
		(X_val, Y_val) = dataObject.get_validation_data()
		bag.train(blackbox_Xtrain, blackbox_Ytrain, X_val, Y_val, dataObject, model)
		predicted = np.argmax(bag.predict(FLAGS.model_dir, X_test, FLAGS.predict_mode),1)
		true = np.argmax(Y_test,1)
		acc = (100*(predicted==true).sum()) / float(len(Y_test))
		print("Bag level test accuracy: %f\n" % acc)
		model.save(FLAGS.seed_model)
	elif FLAGS.mode == 'test':
		predicted = np.argmax(bag.predict(FLAGS.model_dir, X_test, FLAGS.predict_mode),1)
		Y_test = np.argmax(Y_test, 1)
		acc = (100*(predicted == Y_test).sum()) / float(len(Y_test))
		print("Misclassification accuracy: %f" % (acc))
	else:
		print("Invalid option")
def main(argv=None):
    keras.layers.core.K.set_learning_phase(0)
    dataObject = data_load.get_appropriate_data(FLAGS.dataset)()
    if dataObject is None:
        print("Invalid dataset; exiting")
        exit()
    (blackbox_Xtrain,
     blackbox_Ytrain), (X_test, Y_test) = dataObject.get_blackbox_data()
    model = keras.models.load_model(FLAGS.model_path)
    if len(FLAGS.test_prefix):
        X_test, Y_test = np.load(FLAGS.test_prefix +
                                 "_x.npy"), np.load(FLAGS.test_prefix +
                                                    "_y.npy")
        print("Custom test data found")
    accuracy = model.evaluate(X_test, Y_test, batch_size=FLAGS.batch_size)
    print('\nTest accuracy: ' + str(accuracy[1]))
def main(argv=None):
	dataObject = data_load.get_appropriate_data(FLAGS.dataset)(None, None)
	datagen = dataObject.data_generator()
	atack_X, attack_Y = None, None
	if FLAGS.mode == "harden":
		(attack_X, attack_Y), _ = dataObject.get_blackbox_data()
	elif FLAGS.mode == "attack":
		attack_X, attack_Y = dataObject.get_attack_data()
	else:
		raise Exception("Invalid mode specified!")
		exit()
	n_classes = attack_Y.shape[1]
	if FLAGS.dataset == "cifar10":
		keras.backend.set_image_dim_ordering('th')
		attack_X = attack_X.transpose((0, 3, 1, 2))
	model = load_model(FLAGS.model)
	if not FLAGS.multiattacks:
		attack, attack_params = helpers.get_appropriate_attack(FLAGS.dataset, dataObject.get_range(), FLAGS.attack_name ,KerasModelWrapper(model), common.sess, harden=True, attack_type="black")
		perturbed_X = helpers.performBatchwiseAttack(attack_X, attack, attack_params, FLAGS.batch_size)
	else:
		attacks = FLAGS.attack_name.split(',')
		attacks = attacks[1:]
		attack_params = []
		clever_wrapper = KerasModelWrapper(model)
		for attack in attacks:
			attack_params.append(helpers.get_appropriate_attack(FLAGS.dataset, dataObject.get_range(), attack, clever_wrapper, common.sess, harden=True, attack_type="black"))
		attack_Y_shuffled = []
		perturbed_X = []
		attack_indices = np.array_split(np.random.permutation(len(attack_Y)), len(attacks))
		for i, (at, atp) in enumerate(attack_params):
			adv_data = helpers.performBatchwiseAttack(attack_X[attack_indices[i]], at, atp, FLAGS.batch_size)
			perturbed_X.append(adv_data)
			attack_Y_shuffled.append(attack_Y[attack_indices[i]])
		perturbed_X, attack_Y = np.vstack(perturbed_X), np.vstack(attack_Y)
	fooled_rate = 1 - model.evaluate(perturbed_X, attack_Y, batch_size=FLAGS.batch_size)[1]
	print("\nError on adversarial examples: %f" % (fooled_rate))
	if FLAGS.dataset == "cifar10":
		perturbed_X = perturbed_X.transpose((0, 2, 3, 1))
	if FLAGS.save_here:
		np.save(FLAGS.save_here + "_x.npy", perturbed_X)
		np.save(FLAGS.save_here + "_y.npy", attack_Y)
Exemple #6
0
 def __init__(self):
     dataObject = data_load.get_appropriate_data("cifar10")()
     self.attack_data, self.attack_labels = dataObject.get_attack_data()
Exemple #7
0
                    metavar='FLOAT',
                    help='amount of label smoothening to be applied')
args = parser.parse_args()

if __name__ == '__main__':
    batch_size, epochs = args.batch_size, args.epochs
    assert (len(args.save_here) > 0, "Provide a path to save model")
    print("DATASET: {:}".format(args.dataset))
    api_model = load_model(args.blackbox)
    x_data = data_load.get_proxy_data(args.dataset)
    y_data = api_model.predict(x_data, batch_size=1024)
    convert_to_onehot = lambda vector: np_utils.to_categorical(
        np.argmax(vector, axis=1), 10)
    if not args.distill:
        y_data = convert_to_onehot(y_data)
    dataObject = data_load.get_appropriate_data(args.dataset)(None, None)
    _, (x_test, y_test) = dataObject.get_blackbox_data()
    x_train, y_train, x_val, y_val = dataObject.validation_split(
        x_data, y_data, 0.1)
    if args.label_smooth:
        y_train = y_train.clip(args.label_smooth / 9., 1. - args.label_smooth)
    if args.dataset == "cifar10":
        x_train, x_test, x_val = x_train.transpose(
            (0, 3, 1, 2)), x_test.transpose((0, 3, 1, 2)), x_val.transpose(
                (0, 3, 1, 2))
    if args.mode == "finetune":
        if args.dataset == "cifar10":
            keras.backend.set_image_dim_ordering('th')
        proxy = load_model(args.save_here)
        K.set_value(proxy.optimizer.lr, args.learning_rate)
    else: