Ejemplo n.º 1
0
    train_idxs, = np.nonzero(data['y_train'] == class_idx)
    train_idxs = np.random.choice(train_idxs,
                                  size=examples_per_class,
                                  replace=False)
    for j, train_idx in enumerate(train_idxs):
        img = deprocess_image(data['X_train'][train_idx], data['mean_image'])
        plt.subplot(examples_per_class, classes_to_show,
                    1 + i + classes_to_show * j)
        if j == 0:
            plt.title(data['class_names'][class_idx][0])
        plt.imshow(img)
        plt.gca().axis('off')

plt.show()

model = PretrainedCNN(h5_file='cs231n/datasets/pretrained_model.h5')

batch_size = 100

# Test the model on training data
mask = np.random.randint(data['X_train'].shape[0], size=batch_size)
X, y = data['X_train'][mask], data['y_train'][mask]
y_pred = model.loss(X).argmax(axis=1)
print 'Training accuracy: ', (y_pred == y).mean()

# Test the model on validation data
mask = np.random.randint(data['X_val'].shape[0], size=batch_size)
X, y = data['X_val'][mask], data['y_val'][mask]
y_pred = model.loss(X).argmax(axis=1)
print 'Validation accuracy: ', (y_pred == y).mean()
Ejemplo n.º 2
0
from cs231n.classifiers.rnn import CaptioningRNN
from cs231n.coco_utils import load_coco_data, sample_coco_minibatch, decode_captions
from cs231n.image_utils import image_from_url, blur_image, deprocess_image, preprocess_image

#############################################################################


def rel_error(x, y):
    """ returns relative error """
    return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))


# TinyImageNet and pretrained model
data = load_tiny_imagenet('cs231n/datasets/tiny-imagenet-100-A',
                          subtract_mean=True)
model = PretrainedCNN(h5_file='cs231n/datasets/pretrained_model.h5')


def create_class_visualization(target_y, model, **kwargs):
    """
  Perform optimization over the image to generate class visualizations.

  Inputs:
  - target_y: Integer in the range [0, 100) giving the target class
  - model: A PretrainedCNN that will be used for generation

  Keyword arguments:
  - learning_rate: Floating point number giving the learning rate
  - blur_every: An integer; how often to blur the image as a regularizer
  - l2_reg: Floating point number giving L2 regularization strength on the image;
    this is lambda in the equation above.
Ejemplo n.º 3
0
import sys

sys.path.append("E:\\PythonProject\\assignment3")
import time, os, json
import numpy as np
from scipy.misc import imread, imresize
import matplotlib.pyplot as plt

from cs231n.classifiers.pretrained_cnn import PretrainedCNN
from cs231n.data_utils import load_tiny_imagenet
from cs231n.image_utils import blur_image, deprocess_image, preprocess_image

data = load_tiny_imagenet(
    'E:/PythonProject/assignment3/cs231n/datasets/tiny-imagenet-100-A',
    subtract_mean=True)
model = PretrainedCNN(
    h5_file='E:/PythonProject/assignment3/cs231n/datasets/pretrained_model.h5')
'''
def create_class_visualization(target_y, model, **kwargs):
    """
    Perform optimization over the image to generate class visualizations.

    Inputs:
    - target_y: Integer in the range [0, 100) giving the target class
    - model: A PretrainedCNN that will be used for generation

    Keyword arguments:
    - learning_rate: Floating point number giving the learning rate
    - blur_every: An integer; how often to blur the image as a regularizer
    - l2_reg: Floating point number giving L2 regularization strength on the image;
    this is lambda in the equation above.
    - max_jitter: How much random jitter to add to the image as regularization
Ejemplo n.º 4
0
    if j == 0:
      plt.title(data['class_names'][class_idx][0])
    plt.imshow(img)
    plt.gca().axis('off')

plt.show()


# # Pretrained model
# We have trained a deep CNN for you on the TinyImageNet-100-A dataset that we will use for image visualization. The model has 9 convolutional layers (with spatial batch normalization) and 1 fully-connected hidden layer (with batch normalization).
# 
# To get the model, run the script `get_pretrained_model.sh` from the `cs231n/datasets` directory. After doing so, run the following to load the model from disk.

# In[ ]:

model = PretrainedCNN(h5_file='cs231n/datasets/pretrained_model.h5')


# ## Pretrained model performance
# Run the following to test the performance of the pretrained model on some random training and validation set images. You should see training accuracy around 90% and validation accuracy around 60%; this indicates a bit of overfitting, but it should work for our visualization experiments.

# In[ ]:

batch_size = 100

# Test the model on training data
mask = np.random.randint(data['X_train'].shape[0], size=batch_size)
X, y = data['X_train'][mask], data['y_train'][mask]
y_pred = model.loss(X).argmax(axis=1)
print 'Training accuracy: ', (y_pred == y).mean()
Ejemplo n.º 5
0
@author: hjiang

"""
import time, os, json
import numpy as np
from scipy.misc import imread, imresize
import matplotlib.pyplot as plt

from cs231n.classifiers.pretrained_cnn import PretrainedCNN
from cs231n.data_utils import load_tiny_imagenet
from cs231n.image_utils import blur_image, deprocess_image, preprocess_image
import imageio

data = load_tiny_imagenet('cs231n/datasets/tiny-imagenet-100-A',
                          subtract_mean=True)
model = PretrainedCNN(h5_file='cs231n/datasets/pretrained_model.h5')
#import numpy as np
#import matplotlib.pyplot as plt
test_file = '2017-06-07T18-09_vis_color.jpg'
img_a = imageio.imread(test_file)
test_a = img_a.transpose(1, 2, 0)
test_b = np.transpose(test_a, (1, 2, 0))
test_c = test_b[:, 80:740, 60:720]
test_d = test_c.transpose(2, 1, 0)
test_e = test_d.transpose(1, 0, 2)
plt.imshow(test_e)
test_f = imageio.imwrite('sky.jpg', test_e)


#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#Deepdream