plt.figure(3)
    image_diff = np.absolute(activations_orig - activations_adv)
    converted_diff = convert_to_pixel_space(image_diff, max_idx)
    visualize_img(converted_diff)


    return min_idx, max_idx


if __name__ == "__main__":
    input_var = T.tensor4('inputs')
    target_var = T.ivector('targets')

    _, _, classes, mean_image, values = load_weights("/Users/mihaileric/Documents/CS231N/CS231N-FinalProject/vgg19.pkl")

    model = build_model(input_var)
    lasagne.layers.set_all_param_values(model['prob'], values)

    layer_name = "conv1_2"

    # Original image
    # Change path to images as appropriate
    #orig_filename = "/Users/mihaileric/Documents/CS231N/CS231N-FinalProject/datasets/nipun/orig_high_sea slug, nudibranch_0.94_jigsaw puzzle_0.97.png"
    #adv_filename = "/Users/mihaileric/Documents/CS231N/CS231N-FinalProject/datasets/nipun/high_sea slug, nudibranch_0.94_jigsaw puzzle_0.97.png"

    #_, max_idx = visualize_max_filter_activations(model, layer_name, orig_filename, adv_filename)

    # Expects inputs of (num_sample, channels, H, W) dim
    # Reconstruct features for original image
    #reconstruct_features(model, layer_name, input_var, orig_filename, filter_num=125)
Example #2
0
import urllib


import lasagne
from lasagne.layers import InputLayer, DenseLayer, DropoutLayer
from lasagne.layers.conv import Conv2DLayer as ConvLayer
from lasagne.layers import MaxPool2DLayer as PoolLayer
from lasagne.layers import LocalResponseNormalization2DLayer as NormLayer
from lasagne.utils import floatX
from models import vgg19

if __name__ == "__main__":
    classes, mean_image, values = vgg19.load_data()

    net = vgg19.build_model()
    output_layer = net["prob"]
    lasagne.layers.set_all_param_values(output_layer, values)

    image_urls = vgg19.get_images(3)

    count = 0
    for url in image_urls:
        print url
        try:
            rawim, im = prep_image(url, mean_image)

            prob = np.array(lasagne.layers.get_output(output_layer, im, deterministic=True).eval())
            top5 = np.argsort(prob[0])[-1:-6:-1]
            fig = plt.figure()
            plt.imshow(rawim.astype('uint8'))
Example #3
0
def train(model_='VGG_16'):
    batch_size = 16
    nb_samples = 50
    nb_classes = 5
    nb_test_samples = 1000
    nb_epoch = 50
    data_augmentation = False

    if os.name in ['nt']:
        train_pkl = r'C:\Users\crobe\Google Drive\DataMiningGroup\Datasets\restaurant_photos_with_labels_train.pkl'
        test_pkl = r'C:\Users\crobe\Google Drive\DataMiningGroup\Datasets\restaurant_photos_with_labels_test.pkl'
        img_path = r'D:\Yelp\restaurant_photos\\'
        CLASSES = pickle.load(open(r'C:\Users\crobe\Google Drive\DataMiningGroup\Code\data\categories.pkl', 'rb'))
        model_path = r'C:\Users\crobe\Google Drive\DataMiningGroup\Datasets\\'
    else:
        train_pkl = r'/home/rcamachobarranco/datasets/restaurant_photos_with_labels_train.pkl'
        test_pkl = r'/home/rcamachobarranco/datasets/restaurant_photos_with_labels_test.pkl'
        img_path = r'/home/rcamachobarranco/datasets/restaurant_photos/'
        CLASSES = pickle.load(open(r'/home/rcamachobarranco/code/data/categories.pkl', 'rb'))
        model_path = r'/home/rcamachobarranco/datasets/'

    # input image dimensions
    if model_ in MODELS[0:3]:
        img_rows, img_cols = 224, 224
    if model_ in MODELS[3]:
        img_rows, img_cols = 299, 299

    # generate model
    if model_ in MODELS[0]:
        net = vgg16.build_model()
        weights = pickle.load(open(model_path + 'vgg16.pkl', 'rb'))
        net_prob = net['prob']
    elif model_ in MODELS[1]:
        net = vgg19.build_model()
        weights = pickle.load(open(model_path + 'vgg19.pkl', 'rb'))
        net_prob = net['prob']
    elif model_ in MODELS[2]:
        net = googlenet.build_model()
        weights = pickle.load(open(model_path + 'googlenet.pkl', 'rb'))
        net_prob = net['prob']
    elif model_ in MODELS[3]:
        net = inception_v3.build_network()
        weights = pickle.load(open(model_path + 'inception_v3.pkl', 'rb'))
	net_prob = net['softmax']

    lasagne.layers.set_all_param_values(net_prob, weights['param values'])
    logging.info('Finished setting up weights')

    if model_ in MODELS[0]:
        output_layer = DenseLayer(net['fc7'], num_units=len(CLASSES), nonlinearity=softmax)
    elif model_ in MODELS[1]:
        output_layer = DenseLayer(net['fc7_dropout'], num_units=len(CLASSES), nonlinearity=softmax)
    elif model_ in MODELS[2]:
        net['classifier'] = DenseLayer(net['pool5/7x7_s1'],
                                         num_units=len(CLASSES),
                                         nonlinearity=linear)
        output_layer = NonlinearityLayer(net['classifier'],
                                        nonlinearity=softmax)
    elif model_ in MODELS[3]:
        output_layer = DenseLayer(net['pool3'], num_units=len(CLASSES), nonlinearity=softmax)

    # Define loss function and metrics, and get an updates dictionary
    X_sym = T.tensor4()
    y_sym = T.ivector()

    prediction = lasagne.layers.get_output(output_layer, X_sym)
    loss = lasagne.objectives.categorical_crossentropy(prediction, y_sym)
    loss = loss.mean()

    acc = T.mean(T.eq(T.argmax(prediction, axis=1), y_sym),
                 dtype=theano.config.floatX)

    params = lasagne.layers.get_all_params(output_layer, trainable=True)
    updates = lasagne.updates.nesterov_momentum(
        loss, params, learning_rate=0.001, momentum=0.9)

    logging.info('Created model, compiling functions')
    # Compile functions for training, validation and prediction
    train_fn = theano.function([X_sym, y_sym], loss, updates=updates)
    val_fn = theano.function([X_sym, y_sym], [loss, acc])
    pred_fn = theano.function([X_sym], prediction)
    logging.info('Compiled functions, creating batchgen object')

    with BatchGen(batch, seed=1, num_workers=1, input_pkl=train_pkl, img_path=img_path,
                  dtype=np.float32, pixels=img_rows, model=model_, batch_size=batch_size) as train_bg:
        with BatchGen(batch, seed=1, num_workers=1, input_pkl=test_pkl, img_path=img_path,
                      dtype=np.float32, pixels=img_rows, model=model_, batch_size=batch_size) as test_bg:
            logging.info('Created batchgen object')
            for epoch in range(nb_epoch):
                for sample in range(nb_samples):
                    sample = next(train_bg)
                    X_train = sample['x']
                    y_train = sample['y']

                    train_loss = train_fn(X_train, y_train)
                    loss_tot = 0.
                    acc_tot = 0.

                    for chunk in range(nb_test_samples):
                        test_sample = next(test_bg)
                        X_test = test_sample['x']
                        y_test = test_sample['y']
                        test_loss, acc = val_fn(X_test, y_test)
                        loss_tot += test_loss
                        acc_tot += acc

                    loss_tot /= nb_test_samples
                    acc_tot /= nb_test_samples

                    if acc_tot > 0.86:
                        np.savez('/home/rcamachobarranco/datasets/googlenet_{0:.4g}.npz'.format(acc_tot * 100), *lasagne.layers.get_all_param_values(output_layer))
                        #param_values = lasagne.layers.get_all_params(output_layer)
                        #pickle.dump(param_values, open(r'/home/rcamacho/dm/datasets/googlenet_{0:.4g}.pkl'.format(acc_tot * 100), 'wb'),
                        #protocol=pickle.HIGHEST_PROTOCOL)

                    logging.info('Epoch {0} Train_loss {1} Test_loss {2} Test_accuracy {3}'.format(epoch, train_loss, loss_tot, acc_tot * 100))
sys.path.append("/Users/mihaileric/Documents/Research/Lasagne")

import urllib

import lasagne
from lasagne.layers import InputLayer, DenseLayer, DropoutLayer
from lasagne.layers.conv import Conv2DLayer as ConvLayer
from lasagne.layers import MaxPool2DLayer as PoolLayer
from lasagne.layers import LocalResponseNormalization2DLayer as NormLayer
from lasagne.utils import floatX
from models import vgg19

if __name__ == "__main__":
    classes, mean_image, values = vgg19.load_data()

    net = vgg19.build_model()
    output_layer = net["prob"]
    lasagne.layers.set_all_param_values(output_layer, values)

    image_urls = vgg19.get_images(3)

    count = 0
    for url in image_urls:
        print url
        try:
            rawim, im = prep_image(url, mean_image)

            prob = np.array(
                lasagne.layers.get_output(output_layer, im,
                                          deterministic=True).eval())
            top5 = np.argsort(prob[0])[-1:-6:-1]
Example #5
0
    img = img.transpose(1, 2, 0)
    return np.clip(img, 0, 255).astype('uint8')

# Loading and preprocessing the images, and setting up the theano symbolic variables
content_img, content_mean = getImage('images/big_photo2.jpg')
style_img, style_mean = getImage('images/big_art2.jpg')

# The content, style, and generated images. Unlike the paper, I am not randomly initializing the generated image.
CON = theano.shared(np.asarray(content_img, dtype=theano.config.floatX))
STY = theano.shared(np.asarray(style_img, dtype=theano.config.floatX))
X = T.vector()
GEN = T.reshape(X, content_img.shape)
# GEN = theano.shared(np.asarray(content_img, dtype=theano.config.floatX))

# Here we build the VGG model, and get the filter responses of each image from the designated layers (not all will be used)
model = build_model()
layer_names = ['conv1_1', 'conv2_1', 'conv3_1', 'conv4_1', 'conv5_1']

CON_F = {name: lasagne.layers.get_output(model[name], inputs=CON) for name in layer_names}
STY_F = {name: lasagne.layers.get_output(model[name], inputs=STY) for name in layer_names}
GEN_F = {name: lasagne.layers.get_output(model[name], inputs=GEN) for name in layer_names}

# Evaluate those filter responses and save as theano shared variables (so that we don't have to evaluate them over and over)
CON_F = {name: theano.shared(F.eval()) for name, F in CON_F.items()}
STY_F = {name: theano.shared(F.eval()) for name, F in STY_F.items()}

def content_loss(A, B):
    # Returns the sum of squared errors of A and B
    return T.sum(T.sqr(A - B)) / 2.

def gram_matrix(M):
Example #6
0
def train(model_='VGG_16'):
    batch_size = 16
    nb_samples = 50
    nb_classes = 5
    nb_test_samples = 1000
    nb_epoch = 50
    data_augmentation = False

    # Initialize paths relative to our folder
    train_pkl = r'./data/restaurant_photos_with_labels_train.pkl'
    test_pkl = r'./data/restaurant_photos_with_labels_test.pkl'
    img_path = r'./data/restaurant_photos/'
    CLASSES = pickle.load(open(r'./data/categories.pkl', 'rb'))
    model_path = r'./data/models/'

    # input image dimensions
    if model_ in MODELS[0:3]:
        img_rows, img_cols = 224, 224
    if model_ in MODELS[3]:
        img_rows, img_cols = 299, 299

    # generate model
    if model_ in MODELS[0]:
        net = vgg16.build_model()
        weights = pickle.load(open(model_path + 'vgg16.pkl', 'rb'))
        net_prob = net['prob']
    elif model_ in MODELS[1]:
        net = vgg19.build_model()
        weights = pickle.load(open(model_path + 'vgg19.pkl', 'rb'))
        net_prob = net['prob']
    elif model_ in MODELS[2]:
        net = googlenet.build_model()
        weights = pickle.load(open(model_path + 'googlenet.pkl', 'rb'))
        net_prob = net['prob']
    elif model_ in MODELS[3]:
        net = inception_v3.build_network()
        weights = pickle.load(open(model_path + 'inception_v3.pkl', 'rb'))
        net_prob = net['softmax']

    lasagne.layers.set_all_param_values(net_prob, weights['param values'])
    logging.info('Finished setting up weights')

    # Depending on the model is where we will connect our new classifier
    if model_ in MODELS[0]:
        output_layer = DenseLayer(net['fc7'],
                                  num_units=len(CLASSES),
                                  nonlinearity=softmax)
    elif model_ in MODELS[1]:
        output_layer = DenseLayer(net['fc7_dropout'],
                                  num_units=len(CLASSES),
                                  nonlinearity=softmax)
    elif model_ in MODELS[2]:
        net['classifier'] = DenseLayer(net['pool5/7x7_s1'],
                                       num_units=len(CLASSES),
                                       nonlinearity=linear)
        output_layer = NonlinearityLayer(net['classifier'],
                                         nonlinearity=softmax)
    elif model_ in MODELS[3]:
        output_layer = DenseLayer(net['pool3'],
                                  num_units=len(CLASSES),
                                  nonlinearity=softmax)

    # Define loss function and metrics, and get an updates dictionary
    X_sym = T.tensor4()
    y_sym = T.ivector()

    prediction = lasagne.layers.get_output(output_layer, X_sym)
    loss = lasagne.objectives.categorical_crossentropy(prediction, y_sym)
    loss = loss.mean()

    acc = T.mean(T.eq(T.argmax(prediction, axis=1), y_sym),
                 dtype=theano.config.floatX)

    # Define the model parameters as trainable
    params = lasagne.layers.get_all_params(output_layer, trainable=True)
    # Define the parameter updates
    updates = lasagne.updates.nesterov_momentum(loss,
                                                params,
                                                learning_rate=0.001,
                                                momentum=0.9)

    logging.info('Created model, compiling functions')
    # Compile functions for training, validation and prediction
    train_fn = theano.function([X_sym, y_sym], loss, updates=updates)
    val_fn = theano.function([X_sym, y_sym], [loss, acc])
    pred_fn = theano.function([X_sym], prediction)
    logging.info('Compiled functions, creating batchgen object')

    # Using batch generation, train the dataset
    with BatchGen(batch,
                  seed=1,
                  num_workers=1,
                  input_pkl=train_pkl,
                  img_path=img_path,
                  dtype=np.float32,
                  pixels=img_rows,
                  model=model_,
                  batch_size=batch_size) as train_bg:
        with BatchGen(batch,
                      seed=1,
                      num_workers=1,
                      input_pkl=test_pkl,
                      img_path=img_path,
                      dtype=np.float32,
                      pixels=img_rows,
                      model=model_,
                      batch_size=batch_size) as test_bg:
            logging.info('Created batchgen object')
            for epoch in range(nb_epoch):
                for sample in range(nb_samples):
                    sample = next(train_bg)
                    X_train = sample['x']
                    y_train = sample['y']

                    # Train current chunk
                    train_loss = train_fn(X_train, y_train)
                    loss_tot = 0.
                    acc_tot = 0.

                    # Test for many more samples
                    for chunk in range(nb_test_samples):
                        test_sample = next(test_bg)
                        X_test = test_sample['x']
                        y_test = test_sample['y']
                        test_loss, acc = val_fn(X_test, y_test)
                        loss_tot += test_loss
                        acc_tot += acc

                    # Compute the test loss and accuracy
                    loss_tot /= nb_test_samples
                    acc_tot /= nb_test_samples

                    # Store the model if accuracy is above 86%
                    if acc_tot > 0.86:
                        np.savez(
                            r'./data/googlenet_model.npz',
                            *lasagne.layers.get_all_param_values(output_layer))

                    logging.info(
                        'Epoch {0} Train_loss {1} Test_loss {2} Test_accuracy {3}'
                        .format(epoch, train_loss, loss_tot, acc_tot * 100))
Example #7
0
def train(model_='VGG_16'):
    batch_size = 16
    nb_samples = 50
    nb_classes = 5
    nb_test_samples = 1000
    nb_epoch = 50
    data_augmentation = False

    # Initialize paths relative to our folder
    train_pkl = r'./data/restaurant_photos_with_labels_train.pkl'
    test_pkl = r'./data/restaurant_photos_with_labels_test.pkl'
    img_path = r'./data/restaurant_photos/'
    CLASSES = pickle.load(open(r'./data/categories.pkl', 'rb'))
    model_path = r'./data/models/'

    # input image dimensions
    if model_ in MODELS[0:3]:
        img_rows, img_cols = 224, 224
    if model_ in MODELS[3]:
        img_rows, img_cols = 299, 299

    # generate model
    if model_ in MODELS[0]:
        net = vgg16.build_model()
        weights = pickle.load(open(model_path + 'vgg16.pkl', 'rb'))
        net_prob = net['prob']
    elif model_ in MODELS[1]:
        net = vgg19.build_model()
        weights = pickle.load(open(model_path + 'vgg19.pkl', 'rb'))
        net_prob = net['prob']
    elif model_ in MODELS[2]:
        net = googlenet.build_model()
        weights = pickle.load(open(model_path + 'googlenet.pkl', 'rb'))
        net_prob = net['prob']
    elif model_ in MODELS[3]:
        net = inception_v3.build_network()
        weights = pickle.load(open(model_path + 'inception_v3.pkl', 'rb'))
        net_prob = net['softmax']

    lasagne.layers.set_all_param_values(net_prob, weights['param values'])
    logging.info('Finished setting up weights')

    # Depending on the model is where we will connect our new classifier
    if model_ in MODELS[0]:
        output_layer = DenseLayer(net['fc7'], num_units=len(CLASSES), nonlinearity=softmax)
    elif model_ in MODELS[1]:
        output_layer = DenseLayer(net['fc7_dropout'], num_units=len(CLASSES), nonlinearity=softmax)
    elif model_ in MODELS[2]:
        net['classifier'] = DenseLayer(net['pool5/7x7_s1'],
                                         num_units=len(CLASSES),
                                         nonlinearity=linear)
        output_layer = NonlinearityLayer(net['classifier'],
                                        nonlinearity=softmax)
    elif model_ in MODELS[3]:
        output_layer = DenseLayer(net['pool3'], num_units=len(CLASSES), nonlinearity=softmax)

    # Define loss function and metrics, and get an updates dictionary
    X_sym = T.tensor4()
    y_sym = T.ivector()

    prediction = lasagne.layers.get_output(output_layer, X_sym)
    loss = lasagne.objectives.categorical_crossentropy(prediction, y_sym)
    loss = loss.mean()

    acc = T.mean(T.eq(T.argmax(prediction, axis=1), y_sym),
                 dtype=theano.config.floatX)

    # Define the model parameters as trainable
    params = lasagne.layers.get_all_params(output_layer, trainable=True)
    # Define the parameter updates
    updates = lasagne.updates.nesterov_momentum(
        loss, params, learning_rate=0.001, momentum=0.9)

    logging.info('Created model, compiling functions')
    # Compile functions for training, validation and prediction
    train_fn = theano.function([X_sym, y_sym], loss, updates=updates)
    val_fn = theano.function([X_sym, y_sym], [loss, acc])
    pred_fn = theano.function([X_sym], prediction)
    logging.info('Compiled functions, creating batchgen object')

    # Using batch generation, train the dataset
    with BatchGen(batch, seed=1, num_workers=1, input_pkl=train_pkl, img_path=img_path,
                  dtype=np.float32, pixels=img_rows, model=model_, batch_size=batch_size) as train_bg:
        with BatchGen(batch, seed=1, num_workers=1, input_pkl=test_pkl, img_path=img_path,
                      dtype=np.float32, pixels=img_rows, model=model_, batch_size=batch_size) as test_bg:
            logging.info('Created batchgen object')
            for epoch in range(nb_epoch):
                for sample in range(nb_samples):
                    sample = next(train_bg)
                    X_train = sample['x']
                    y_train = sample['y']

                    # Train current chunk
                    train_loss = train_fn(X_train, y_train)
                    loss_tot = 0.
                    acc_tot = 0.

                    # Test for many more samples
                    for chunk in range(nb_test_samples):
                        test_sample = next(test_bg)
                        X_test = test_sample['x']
                        y_test = test_sample['y']
                        test_loss, acc = val_fn(X_test, y_test)
                        loss_tot += test_loss
                        acc_tot += acc

                    # Compute the test loss and accuracy
                    loss_tot /= nb_test_samples
                    acc_tot /= nb_test_samples

                    # Store the model if accuracy is above 86%
                    if acc_tot > 0.86:
                        np.savez(r'./data/googlenet_model.npz', *lasagne.layers.get_all_param_values(output_layer))

                    logging.info('Epoch {0} Train_loss {1} Test_loss {2} Test_accuracy {3}'.format(epoch, train_loss, loss_tot, acc_tot * 100))
Example #8
0
    plt.figure(3)
    image_diff = np.absolute(activations_orig - activations_adv)
    converted_diff = convert_to_pixel_space(image_diff, max_idx)
    visualize_img(converted_diff)

    return min_idx, max_idx


if __name__ == "__main__":
    input_var = T.tensor4('inputs')
    target_var = T.ivector('targets')

    _, _, classes, mean_image, values = load_weights(
        "/Users/mihaileric/Documents/CS231N/CS231N-FinalProject/vgg19.pkl")

    model = build_model(input_var)
    lasagne.layers.set_all_param_values(model['prob'], values)

    layer_name = "conv1_2"

    # Original image
    # Change path to images as appropriate
    #orig_filename = "/Users/mihaileric/Documents/CS231N/CS231N-FinalProject/datasets/nipun/orig_high_sea slug, nudibranch_0.94_jigsaw puzzle_0.97.png"
    #adv_filename = "/Users/mihaileric/Documents/CS231N/CS231N-FinalProject/datasets/nipun/high_sea slug, nudibranch_0.94_jigsaw puzzle_0.97.png"

    #_, max_idx = visualize_max_filter_activations(model, layer_name, orig_filename, adv_filename)

    # Expects inputs of (num_sample, channels, H, W) dim
    # Reconstruct features for original image
    #reconstruct_features(model, layer_name, input_var, orig_filename, filter_num=125)