def train(rnn, n_steps, print_every, x_tensor, y_tensor, time_steps):

    # initialize hidden state
    hidden = None

    for batch_i, step in enumerate(range(n_steps)):

        # outputs from the rnn
        prediction, hidden = rnn(x_tensor, hidden)

        ## Representing Memory ##
        # make a new variable for hidden and detach the hidden state from its history
        # this way, we don't backpropagate through the entire history
        hidden = hidden.data

        # calculate the loss
        loss = criterion(prediction, y_tensor)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # display loss and predictions
        if batch_i % print_every == 0:
            print('Loss: ', loss.item())
            plt.figure(figsize=(8, 5))
            plt.plot(time_steps[1:], x, 'r.', label='input')  # input
            plt.plot(time_steps[1:], prediction.data.numpy().flatten(), 'b.', label='prediction')  # predictions
            plt.legend()
            fig = plt.gcf()
            name = 'time_series_pred' + str(batch_i)
            save_fig('RNN/images', fig, name)

    return rnn
def viz_layer(layer, name, n_filters=4):
    fig = plt.figure(figsize=(20, 20))

    for i in range(n_filters):
        ax = fig.add_subplot(1, n_filters, i + 1, xticks=[], yticks=[])
        # grab layer outputs
        ax.imshow(np.squeeze(layer[0, i].data.numpy()), cmap='gray')
        ax.set_title('Output %s' % str(i + 1))
    fig = plt.gcf()
    save_fig('CNN/images', fig, name)
示例#3
0
x = df_final[:-1]  # all but the last piece of data
y = df_final[1:]

x = x.to_numpy()
y = y.to_numpy()

x_reshape = x.reshape(-1,1)
y_reshape = y.reshape(-1,1)

# convert data into Tensors
x_tensor = torch.Tensor(x_reshape).unsqueeze(0)  # unsqueeze gives a 1, batch_size dimension
y_tensor = torch.Tensor(y_reshape)

trained_rnn, prediction = train(rnn, iterations, print_every, x_tensor, y_tensor, time_steps, criterion, optimizer)

plt.figure(figsize=(12, 8))
plt.title(str(group) + ' divided by' + str(divide) + ' lr: ' + str(lr) + ' hidden_dim: ' + str(hidden_dim) +
          ' n_layers:' + str(n_layers))
plt.plot(time_steps, x_reshape, 'r.', label='input')  # input
plt.plot(time_steps, x_reshape, 'r-')
plt.plot(time_steps, prediction.data.numpy().flatten(), 'b.', label='prediction')
plt.plot(time_steps, prediction.data.numpy().flatten(), 'b-')  # predictions
plt.plot(time_steps, y_reshape, 'g.', label='real')
plt.plot(time_steps, y_reshape, 'g-')
plt.legend()
fig = plt.gcf()
name = 'pred_' + group
save_fig('RNN/images', fig, name)

print('rnn is completed')
import cv2
import numpy as np

from common.save_fig import save_fig

# Read in the image
image = mpimg.imread('images/curved_lane.jpg')
plt.imshow(image)
plt.show()

# Convert to grayscale for filtering
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
plt.imshow(gray, cmap='gray')
fig = plt.gcf()
save_fig('CNN/images', fig, 'curved_lane_gray')

# 3x3 array for edge detection
sobel_y = np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]])

sobel_x_left = np.array([[1, 0, -1], [2, 0, -2], [1, 0, -1]])

sobel_x_right = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])

blur = np.array([[0.0625, 0.125, 0.0625], [0.125, 0.25, 0.125],
                 [0.0625, 0.125, 0.0625]])

emboss = np.array([[-2, -1, 0], [-1, 1, 1], [0, 1, 2]])

outline = np.array([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]])
示例#5
0
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()

# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
    ax = fig.add_subplot(2, 20 / 2, idx + 1, xticks=[], yticks=[])
    ax.imshow(np.squeeze(images[idx]), cmap='gray')
    # print out the correct label for each image
    # .item() gets the value contained in a Tensor
    ax.set_title(str(labels[idx].item()))

fig = plt.gcf()
name = 'example_mnist_images_valid'
save_fig(fig, name)

# view one image in more detail
img = np.squeeze(images[1])

fig = plt.figure(figsize=(12, 12))
ax = fig.add_subplot(111)
ax.imshow(img)
width, height = img.shape
treshold = img.max() / 2.5
for x in range(width):
    for y in range(height):
        val = round(img[x][y], 2) if (img[x][y], 2) != 0 else 0
        ax.annotate(str(val),
                    xy=(y, x),
                    horizontalalignment='center',
    fig = plt.figure(figsize=(20, 20))

    for i in range(n_filters):
        ax = fig.add_subplot(1, n_filters, i + 1)
        # grab layer outputs
        ax.imshow(np.squeeze(layer[0, i].data.numpy()), cmap='gray')
        ax.set_title('Output %s' % str(i + 1))

    fig = plt.gcf()
    save_fig('CNN/images', fig, name)


# plot original image
plt.imshow(gray_img, cmap='gray')
fig = plt.gcf()
save_fig('CNN/images', fig, 'max_pool_car_gray')

# visualize all filters
fig = plt.figure(figsize=(10, 5))
for i in range(4):
    ax = fig.add_subplot(1, 4, i+1, xticks=[], yticks=[])
    ax.imshow(filters[i], cmap='gray')
    ax.set_title('Filter %s' % str(i+1))
    width, height = filters[i].shape
    for x in range(width):
        for y in range(height):
            ax.annotate(str(filters[i][x][y]), xy=(y,x),
                        horizontalalignment='center',
                        verticalalignment='center',
                        color='white' if filters[i][x][y]<0 else 'black')
# TODO: Feel free to try out your own images here by changing img_path
# to a file path to another image on your computer!
img_path = 'images/udacity_sdc.png'

# load color image
bgr_img = cv2.imread(img_path)
# convert to grayscale
gray_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2GRAY)

# normalize, rescale entries to lie in [0,1]
gray_img = gray_img.astype("float32") / 255

# plot image
plt.imshow(gray_img, cmap='gray')
fig = plt.gcf()
save_fig('CNN/images', fig, 'car_gray')

# define and visualize the filters
filter_vals = np.array([[-1, -1, 1, 1], [-1, -1, 1, 1], [-1, -1, 1, 1],
                        [-1, -1, 1, 1]])
print('Filter shape: ', filter_vals.shape)

# Defining four different filters,
# all of which are linear combinations of the `filter_vals` defined above

# define four filters
filter_1 = filter_vals
filter_2 = -filter_1
filter_3 = filter_1.T
filter_4 = -filter_3
filters = np.array([filter_1, filter_2, filter_3, filter_4])
示例#8
0

# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()  # labels 20
images = images.numpy()  # convert images to numpy for display 20,3,32,32

# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
# display 20 images
for idx in np.arange(20):
    ax = fig.add_subplot(2, 20 / 2, idx + 1, xticks=[], yticks=[])
    imshow(images[idx])
    ax.set_title(classes[labels[idx]])
fig = plt.gcf()
save_fig('CNN/cifar_images', fig, 'images_in_batch')

rgb_img = np.squeeze(images[3])
channels = ['red channel', 'green channel', 'blue channel']

# images in detail
fig = plt.figure(figsize=(36, 36))
for idx in np.arange(rgb_img.shape[0]):
    ax = fig.add_subplot(1, 3, idx + 1)
    img = rgb_img[idx]
    ax.imshow(img, cmap='gray')
    ax.set_title(channels[idx])
    width, height = img.shape
    thresh = img.max() / 2.5
    for x in range(width):
        for y in range(height):