Esempio n. 1
0
def trainning():
    (X, Y), (X_test, Y_test) = cifar10.load_data()
    Y = cifar10.to_categorical(Y, 10)
    Y_test = cifar10.to_categorical(Y_test, 10)
    data_set = cifar10.read_data_sets(X, Y, X_test, Y_test)
    # mnist = input_data.read_data_sets("tmp/mnist", one_hot=True)
    # batch_x, batch_y = data_set.train.next_batch(96)

    x_placeholder = tf.placeholder("float", [None, 32 * 32 * 3])
    y_placeholder = tf.placeholder("float", [None, 10])

    logits = cifar10.inference(x_placeholder)
    loss = cifar10.loss(logits, y_placeholder)
    train_op = cifar10.train_op(loss=loss, learning_rate=0.001)
    accuracy = cifar10.accuracy(logits, y_placeholder)
    init = tf.initialize_all_variables()

    with tf.Session() as sess:
        sess.run(init)
        for step in range(MAX_STEPS):
            # print('step = {:d}'.format(step + 1))
            batch_x, batch_y = data_set.train.next_batch(96)
            # print(batch_x.shape)
            # print(batch_y.shape)
            _, Loss, acc = sess.run([train_op, loss, accuracy],
                                    feed_dict={
                                        x_placeholder: batch_x,
                                        y_placeholder: batch_y
                                    })
            if (step + 1) % 100 == 0:
                print("step: {:d} loss: {:f} acc: {:f}".format(
                    step + 1, Loss, acc))
Esempio n. 2
0
def train():
    cross_entropy = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
    train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

    cucumber = cifar10.load_data()

    session.run(tf.global_variables_initializer())
    for i in range(20000):
        batch = cucumber.train.next_batch(50)
        if i % 100 == 0:
            train_accuracy = accuracy.eval(session=session,
                                           feed_dict={
                                               x: batch[0],
                                               y_: batch[1],
                                               keep_prob: 1.0
                                           })
            print('step %d, training accuracy %g' % (i, train_accuracy))
        train_step.run(session=session,
                       feed_dict={
                           x: batch[0],
                           y_: batch[1],
                           keep_prob: 0.5
                       })

    print('test accuracy %g' % accuracy.eval(session=session,
                                             feed_dict={
                                                 x: cucumber.test.images,
                                                 y_: cucumber.test.labels,
                                                 keep_prob: 1.0
                                             }))

    saver.save(sess=session, save_path=save_path)
Esempio n. 3
0
def load_data(dataset=10, is_tune=False, is_crop_filp=False):

    if dataset == 10:
        (train_data, train_labels), (test_data,
                                     test_labels) = cifar10.load_data()

    if is_tune:
        test_data = train_data[-10000:]
        test_labels = train_labels[-10000:]
        train_data = train_data[:-10000]
        train_labels = train_labels[:-10000]

    #  (N, 1) --> (N,)
    train_labels = np.squeeze(train_labels)
    test_labels = np.squeeze(test_labels)

    # per image standarizartion
    test_data = per_image_standardization(test_data)

    # use the below line only when online setting
    if is_crop_filp:
        train_data = padding(train_data)
        train_data = random_crop_and_flip(train_data, padding_size=2)
    else:
        train_data = train_data
    train_data = per_image_standardization(train_data)

    print(
        'Loading dataset: [CIFAR%d], is_tune: [%s], is_preprocessed: [%s], is_crop_filp:[%s]'
        % (dataset, is_tune, 'True', str(is_crop_filp)))
    print('Train_data: {}, Test_data: {}'.format(train_data.shape,
                                                 test_data.shape))
    return (train_data, train_labels), (test_data, test_labels)
Esempio n. 4
0
    def __init__(self, dataset):
        num_classes = 10
        (train_data, train_labels), (test_data,
                                     test_labels) = cifar10.load_data()

        img_rows, img_cols = 32, 32

        if backend.image_data_format() == 'channels_first':
            train_images = train_data.reshape(train_data.shape[0], 3, img_rows,
                                              img_cols)
            test_images = test_data.reshape(test_data.shape[0], 3, img_rows,
                                            img_cols)
            input_shape = (3, img_rows, img_cols)
        else:
            train_images = train_data.reshape(train_data.shape[0], img_rows,
                                              img_cols, 3)
            test_images = test_data.reshape(test_data.shape[0], img_rows,
                                            img_cols, 3)
            input_shape = (img_rows, img_cols, 3)

        train_images = train_images.astype('float32') / 255
        train_labels = to_categorical(train_labels, num_classes)

        test_images = test_images.astype('float32') / 255
        test_labels = to_categorical(test_labels, num_classes)

        self.input_shape = input_shape
        self.num_classes = num_classes
        self.train_images, self.train_labels = train_images, train_labels
        self.test_images, self.test_labels = test_images, test_labels
Esempio n. 5
0
def load_data(dataset=10, is_tune=False):

    if dataset == 10:
        (train_data, train_labels), (test_data,
                                     test_labels) = cifar10.load_data()
    if dataset == 100:
        (train_data, train_labels), (test_data,
                                     test_labels) = cifar100.load_data()

    if is_tune:
        test_data = train_data[:5000]
        test_labels = train_labels[:5000]
        train_data = train_data[5000:]
        train_labels = train_labels[5000:]

    #  (N, 1) --> (N,)
    train_labels = np.squeeze(train_labels)
    test_labels = np.squeeze(test_labels)

    # per image standarizartion
    test_data = per_image_standardization(test_data)
    train_data = per_image_standardization(train_data)
    print('Load dataset: [CIFAR%d], is_tune: [%s], is_preprocessed: [%s]' %
          (dataset, is_tune, 'True'))
    print('Train_data: {}, Test_data: {}'.format(train_data.shape,
                                                 test_data.shape))
    return (train_data, train_labels), (test_data, test_labels)
Esempio n. 6
0
def get_dataset(dataset, batch_size, supervised=False, noise_size=(128, )):
    from cifar10 import load_data
    (X, y), (X_test, y_test) = load_data()

    return LabeledArrayDataset(X=X,
                               y=y if supervised else None,
                               X_test=X_test,
                               y_test=y_test,
                               batch_size=batch_size,
                               noise_size=noise_size)
Esempio n. 7
0
def load_graph():
    saver.restore(sess=session, save_path=save_path)
    print('weights loaded!')
    cucumber = cifar10.load_data()
    print('test accuracy %g' % accuracy.eval(session=session,
                                             feed_dict={
                                                 x: cucumber.test.images,
                                                 y_: cucumber.test.labels,
                                                 keep_prob: 1.0
                                             }))
Esempio n. 8
0
def get_dataset(dataset, batch_size, supervised=False, noise_size=(128, )):
    if dataset == 'mnist':
        from keras.datasets import mnist
        (X, y), (X_test, y_test) = mnist.load_data()
        X = X.reshape((X.shape[0], X.shape[1], X.shape[2], 1))
        X_test = X_test.reshape(
            (X_test.shape[0], X_test.shape[1], X_test.shape[2], 1))
    elif dataset == 'cifar10':
        from cifar10 import load_data
        (X, y), (X_test, y_test) = load_data()
    elif dataset == 'cifar100':
        from cifar100 import load_data
        (X, y), (X_test, y_test) = load_data()
    elif dataset == 'fashion-mnist':
        from fashion_mnist import load_data
        (X, y), (X_test, y_test) = load_data()
        X = X.reshape((X.shape[0], X.shape[1], X.shape[2], 1))
        X_test = X_test.reshape(
            (X_test.shape[0], X_test.shape[1], X_test.shape[2], 1))
    elif dataset == 'stl10':
        from stl10 import load_data
        (X, y), (X_test, y_test) = load_data()
        assert not supervised
    elif dataset == 'tiny-imagenet':
        from tiny_imagenet import load_data
        (X, y), (X_test, y_test) = load_data()
    elif dataset == 'imagenet':
        from imagenet import ImageNetdataset
        return ImageNetdataset('../imagenet-resized',
                               '../imagenet-resized-val/val',
                               batch_size=batch_size,
                               noise_size=noise_size,
                               conditional=supervised)

    return LabeledArrayDataset(X=X,
                               y=y if supervised else None,
                               X_test=X_test,
                               y_test=y_test,
                               batch_size=batch_size,
                               noise_size=noise_size)
Esempio n. 9
0
def main(src_model_name, eps):
    np.random.seed(0)
    tf.set_random_seed(0)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    K.set_session(tf.Session(config=config))

    if args.dataset == "mnist":
        K.set_image_data_format('channels_last')
        set_mnist_flags()
        x = K.placeholder(
            (None, FLAGS.IMAGE_ROWS, FLAGS.IMAGE_COLS, FLAGS.NUM_CHANNELS))
        y = K.placeholder((None, FLAGS.NUM_CLASSES))
        _, _, X_test, Y_test = data_mnist()
        # source model for crafting adversarial examples
        src_model = load_model_mnist(src_model_name)

    elif args.dataset == "cifar10":
        set_flags(20)
        K.set_image_data_format('channels_first')
        x = K.placeholder(
            (None, FLAGS.NUM_CHANNELS, FLAGS.IMAGE_ROWS, FLAGS.IMAGE_COLS))
        y = K.placeholder((None, FLAGS.NUM_CLASSES))
        _, _, X_test, Y_test = load_data()
        # source model for crafting adversarial examples
        src_model = load_model(src_model_name)
    logits = src_model(x)

    for sd in [0.4, 0.5, 0.6]:
        one_hot = np.zeros(shape=(len(Y_test), 10))
        for i in range(100):
            logits_np = batch_eval([x, y], [logits], [
                X_test + np.random.normal(scale=sd, size=X_test.shape), Y_test
            ])[0]
            one_hot[np.arange(len(Y_test)), logits_np.argmax(axis=1)] += 1
        robust = np.apply_along_axis(func1d=isRobust,
                                     axis=1,
                                     arr=one_hot,
                                     sd=sd,
                                     epsilon=eps)
        total_robust = np.sum(
            np.logical_and(robust[:, 0] == True,
                           one_hot.argmax(axis=1)
                           == Y_test.argmax(axis=1))) / 100.
        accuracy = np.sum(one_hot.argmax(axis=1) == Y_test.argmax(
            axis=1)) / 100.
        with open('bound_' + src_model_name + '_bound.txt', 'a') as log:
            log.write("Ave bound is {} at sigma = {}\n".format(
                np.mean(robust[:, 1]), sd))
            log.write("Accuracy: {}, Robust accuracy: {}, l={}\n".format(
                accuracy, total_robust, eps))
Esempio n. 10
0
def get_dataset(data_dir, preprocess_fcn=None):
    """ train dataset has shape [50000, 32, 32, 3], test dataset has shape [10000, 32, 32, 3].
  """
    (X_train, Y_train), (X_test, Y_test) = cifar.load_data(data_dir)
    if preprocess_fcn is not None:
        X_train, Y_train = preprocess_fcn(X_train, Y_train)
        X_test, Y_test = preprocess_fcn(X_test, Y_test)

    train = DataSet(X_train, Y_train)
    test = DataSet(X_test, Y_test)
    height, width, channels = X_train.shape[1], X_train.shape[
        2], X_train.shape[3]

    return Datasets(train, test, height, width, channels)
Esempio n. 11
0
import numpy
import os

import keras
from keras.preprocessing.image import ImageDataGenerator
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D

import cifar10

batch_size = 32
num_classes = 10
epochs = 50
data_augmentation = False

(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train, y_test = [np_utils.to_categorical(x) for x in (y_train, y_test)]
print('x_train shape:', x_train.shape)
print('y_train shape:', y_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')

# load json and create model
json_file = open("model.json", "r")
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("model.hdf5")
print("Loaded model from disk")
Esempio n. 12
0
import os
from cifar10 import load_data
import PIL.Image as Image

if __name__ == '__main__' :
    outDir = './CIFAR-10/'
    train, test, labels = load_data()
    labels = labels['label_names']

    def writeData (outDir, dir, data, labels) :
        # setup the train directories
        subDir = os.path.join(outDir, dir)
        if not os.path.exists(subDir) :
            os.makedirs(subDir)

        # write the label directories
        counts = [0] * len(labels)
        for im, ii in zip(data[0], data[1]) :
            # create the directory
            labelDir = os.path.join(subDir, str(labels[ii]))
            if not os.path.exists(labelDir) :
                os.makedirs(labelDir)

            # write the image to the directory
            imPath = os.path.join(labelDir, str(counts[ii]) + '.tif')
            Image.merge('RGB', (Image.fromarray(im[0]),
                                Image.fromarray(im[1]),
                                Image.fromarray(im[2]))).save(imPath)
            counts[ii] += 1

    # setup the train directories
Esempio n. 13
0
from keras.optimizers import SGD
fro keras.datasets import cifar10
from keras.utils import np_utils
import argparse

ap = argparse.ArgumentParser()
ap.add_argument("-n", "--network", required = True, help = "name of the network to build")
ap.add_argument("-m" "--model", required = True, help = "path to output model file")
ap.add_argument("-d", "--dropout", type = int, default=-1, help="weather or not dropout should be used")
ap.add_argument("-f", "--activation", type="str", default="tanh", help = "Activation function to use (LeNet only)")
ap.add_argument("-e", "--epochs", type = int, default = 20, help = "# of epochs")
ap.add_argument("-b", "--batch-size", type = int, default = 32, help="size of mini-batches passed to network")
ap.add_argument("-v", "--verbose", type = int, default=1, help ="verbosity level")
args = vars(ap.parse_args())

((trainData, trainLabels), (testData, testLabels)) = cifar10.load_data()
trainData = trainData.astype("float")/255.0
testData = testData.astype("float")/255.0

trainLabels = np_utils.to_categorical(trainLabels, 10)
testLabels = np_utils.to_categorical(testLabels, 10)

kargs = {"dropout": args["dropout"]>0, "activation":args["activation"]}

#train the model using SGD
print ("[INFO] compiling model...")
model = ConvnetFactory.build(args["network"], 3, 32, 32, 10, **kargs)
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov= True)
model.compile(loss="categorical_crossentropy", optimizer=sgd, metrics=["accuracy"])

# start the training process
Esempio n. 14
0
def main(attack, src_model_name, target_model_names):
    np.random.seed(0)
    tf.set_random_seed(0)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    K.set_session(tf.Session(config=config))

    if args.dataset == "mnist":
        K.set_image_data_format('channels_last')
        set_mnist_flags()
        x = K.placeholder(
            (None, FLAGS.IMAGE_ROWS, FLAGS.IMAGE_COLS, FLAGS.NUM_CHANNELS))
        y = K.placeholder((None, FLAGS.NUM_CLASSES))
        _, _, X_test, Y_test = data_mnist()
        # source model for crafting adversarial examples
        src_model = load_model_mnist(src_model_name)
        sd = 0.7

    elif args.dataset == "cifar10":
        set_flags(20)
        K.set_image_data_format('channels_first')
        x = K.placeholder(
            (None, FLAGS.NUM_CHANNELS, FLAGS.IMAGE_ROWS, FLAGS.IMAGE_COLS))
        y = K.placeholder((None, FLAGS.NUM_CLASSES))
        _, _, X_test, Y_test = load_data()
        # source model for crafting adversarial examples
        src_model = load_model(src_model_name)
        sd = 100. / 255.

    # model(s) to target
    target_models = [None] * len(target_model_names)
    for i in range(len(target_model_names)):
        target_models[i] = load_model(target_model_names[i])

    # simply compute test error
    if attack == "test":
        acc = tf_test_acc(src_model, x, X_test, Y_test)
        print('{}: {:.1f}'.format(basename(src_model_name), acc))

        for (name, target_model) in zip(target_model_names, target_models):
            acc = tf_test_acc(target_model, x, X_test, Y_test)
            print('{}: {:.1f}'.format(basename(name), acc))
        return

    eps = args.eps

    # take the random step in the RAND+FGSM
    if attack == "rfgs":
        X_test = np.clip(
            X_test + args.alpha * np.sign(np.random.randn(*X_test.shape)), 0.0,
            1.0)
        eps -= args.alpha

    logits = src_model(x)
    grad = gen_grad(x, logits, y)

    # FGSM and RAND+FGSM one-shot attack
    if attack in ["fgs", "rfgs"]:
        adv_x = symbolic_fgs(x, grad, eps=eps)

    # iterative FGSM
    if attack == "pgd":
        adv_x = iter_fgs(src_model,
                         x,
                         y,
                         steps=args.steps,
                         eps=eps,
                         alpha=eps / 10.0)

    if attack == 'so':
        adv_x = so(src_model,
                   x,
                   y,
                   steps=args.steps,
                   eps=eps,
                   alpha=eps / 10.0,
                   norm=args.norm,
                   sd=sd)

    print('start')
    # compute the adversarial examples and evaluate
    X_adv = batch_eval([x, y], [adv_x], [X_test, Y_test])[0]
    # pdb.set_trace()
    print('-----done----')
    # white-box attack
    acc = tf_test_acc(src_model, x, X_adv, Y_test, sd=sd, num_iter=10)
    with open('attacks.txt', 'a') as log:
        log.write('{}->{}: {:.1f}, size = {:.4f}\n'.format(
            basename(src_model_name), basename(src_model_name), acc, eps))

    # black-box attack
    for (name, target_model) in zip(target_model_names, target_models):
        acc = tf_test_acc(target_model, x, X_adv, Y_test, sd=sd, num_iter=10)
        with open('attacks.txt', 'a') as log:
            log.write('{}->{}: {:.1f}, size = {:.4f}\n'.format(
                basename(src_model_name), basename(name), acc, eps))
Esempio n. 15
0
def main(attack, src_model_name, target_model_names):
    np.random.seed(0)
    tf.set_random_seed(0)

    set_flags(20)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    K.set_session(tf.Session(config=config))

    x = K.placeholder(
        (None, FLAGS.NUM_CHANNELS, FLAGS.IMAGE_ROWS, FLAGS.IMAGE_COLS))

    y = K.placeholder((None, FLAGS.NUM_CLASSES))

    _, _, X_test, Y_test = load_data()

    # source model for crafting adversarial examples
    src_model = load_model(src_model_name)

    # model(s) to target
    target_models = [None] * len(target_model_names)
    for i in range(len(target_model_names)):
        target_models[i] = load_model(target_model_names[i])

    # simply compute test error
    if attack == "test":
        err = tf_test_error_rate(src_model, x, X_test, Y_test)
        print('{}: {:.1f}'.format(basename(src_model_name), 100 - err))

        for (name, target_model) in zip(target_model_names, target_models):
            err = tf_test_error_rate(target_model, x, X_test, Y_test)
            print('{}: {:.1f}'.format(basename(name), 100 - err))
        return

    eps = args.eps

    # take the random step in the RAND+FGSM
    if attack == "rfgs":
        X_test = np.clip(
            X_test + args.alpha * np.sign(np.random.randn(*X_test.shape)), 0.0,
            1.0)
        eps -= args.alpha

    logits = src_model(x)
    grad = gen_grad(x, logits, y)

    # FGSM and RAND+FGSM one-shot attack
    if attack in ["fgs", "rfgs"]:
        adv_x = symbolic_fgs(x, grad, eps=eps)

    # iterative FGSM
    if attack == "pgd":
        adv_x = iter_fgs(src_model,
                         x,
                         y,
                         steps=args.steps,
                         eps=args.eps,
                         alpha=args.eps / 10.0)

    if attack == 'mim':
        adv_x = momentum_fgs(src_model, x, y, eps=args.eps)

    print('start')
    # compute the adversarial examples and evaluate
    X_adv = batch_eval([x, y], [adv_x], [X_test, Y_test])[0]
    print('-----done----')
    # white-box attack
    err = tf_test_error_rate(src_model, x, X_adv, Y_test)
    print('{}->{}: {:.1f}'.format(basename(src_model_name),
                                  basename(src_model_name), 100 - err))

    # black-box attack
    for (name, target_model) in zip(target_model_names, target_models):
        err = tf_test_error_rate(target_model, x, X_adv, Y_test)
        print('{}->{}: {:.1f}'.format(basename(src_model_name), basename(name),
                                      100 - err))
Esempio n. 16
0
#!/usr/bin/env python

import cv2

from cifar10 import load_data

if __name__ == '__main__':

    (x_train, y_train), (x_test, y_test) = load_data()
    print(x_train.shape)

    ind = 0

    image = cv2.cvtColor(x_train[ind], cv2.COLOR_BGR2RGB)

    labels = ('Airplane', 'Automobile', 'Bird', 'Cat', 'Deer', 'Dog', 'Frog',
              'Horse', 'Ship', 'Truck')

    print(labels[y_train[ind][0]])

    name = labels[y_train[ind][0]]

    cv2.imwrite('cifar_' + str(ind) + '_' + name + '.jpg', image)

    cv2.imshow('test', image)
    cv2.waitKey(0)
    cv2.destroyAllWindows()