Ejemplo n.º 1
0
  activations from the sparse autoencoder
"""

# For 1000 random points
for i in range(1000):
    feature_num = np.random.randint(hidden_size)
    image_num = np.random.randint(8)
    image_row = np.random.randint(image_dim - patch_dim + 1)
    image_col = np.random.randint(image_dim - patch_dim + 1)

    patch = conv_images[image_row:image_row + patch_dim, image_col:image_col + patch_dim, :, image_num]
    patch = np.concatenate((patch[:, :, 0].flatten(), patch[:, :, 1].flatten(), patch[:, :, 2].flatten()))
    patch = (patch-mean_patch).reshape((-1, 1))
    patch = zca_white.dot(patch) # ZCA whitening

    features = feedforward_autoencoder(opt_theta, hidden_size, visible_size, patch)

    if abs(features[feature_num, 0] - convolved_features[feature_num, image_num, image_row, image_col]) > 1e-9:
        print('Convolved feature does not match activation from autoencoder\n')
        print('Feature Number    : {}\n'.format(feature_num))
        print('Image Number      : {}\n'.format(image_num))
        print('Image Row         : {}\n'.format(image_row))
        print('Image Column      : {}\n'.format(image_col))
        print('Convolved feature : {:f}\n'.format(convolved_features[feature_num, image_num, image_row, image_col]))
        print('Sparse AE feature : {:f}\n'.format(features[feature_num, 0]))
        print('Error! Convolved feature does not match activation from autoencoder')

print('Congratulations! Your convolution code passed the test.')


"""
    W1 = sae1_opt_theta[0:hidden_size_L1*input_size].reshape((hidden_size_L1, input_size))
    image = display_network(W1.T)
    plt.figure()
    plt.imshow(image, cmap=plt.cm.gray)
    plt.show()


"""
STEP 2: Train the second sparse autoencoder

  This trains the second sparse autoencoder on the first autoencoder featurse.
  If you've correctly implemented sparse_autoencoder_cost, you don't need
  to change anything here.
"""

sae1_features = feedforward_autoencoder(sae1_opt_theta, hidden_size_L1, input_size, train_data)

#  Randomly initialize the parameters
sae2_theta = initialize_parameters(hidden_size_L2, hidden_size_L1)

#  Instructions: Train the second layer sparse autoencoder, this layer has
#                an hidden size of "hidden_size_L2" and an input size of "hidden_size_L1"
#                You should store the optimal parameters in sae2_opt_theta
J = lambda theta : sparse_autoencoder_cost(theta, hidden_size_L1, hidden_size_L2,
    lambda_, sparsity_param, beta, sae1_features)

options = {'maxiter': maxiter, 'disp': True}

results = scipy.optimize.minimize(J, sae2_theta, method='L-BFGS-B', jac=True, options=options)
sae2_opt_theta = results['x']
Ejemplo n.º 3
0
# Visualize weights
W1 = opt_theta[0:hidden_size * input_size].reshape((hidden_size, input_size))
image = display_network(W1.T)
plt.figure()
plt.imsave('stl_weights.png', image, cmap=plt.cm.gray)
#plt.imshow(image, cmap=plt.cm.gray)
"""
STEP 3: Extract Features from the Supervised Dataset

  You need to complete the code in feed_forward_autoencoder so that the
  following command will extract features from the data.

"""

train_features = feedforward_autoencoder(opt_theta, hidden_size, input_size,
                                         train_data)
test_features = feedforward_autoencoder(opt_theta, hidden_size, input_size,
                                        test_data)
"""
STEP 4: Train the softmax classifier

  Use softmax_train from the previous exercise to train a multi-class classifier.

  Use lambda = 1e-4 for the weight regularization for softmax

  You need to compute softmax_model using softmax_train on train_features and
  train_labels
"""

lambda_ = 1e-4  # weight decay parameter
options = {'maxiter': maxiter, 'disp': True}
Ejemplo n.º 4
0
W1 = opt_theta[0:hidden_size*input_size].reshape((hidden_size, input_size))
image = display_network(W1.T)
plt.figure()
plt.imsave('stl_weights.png', image, cmap=plt.cm.gray)
#plt.imshow(image, cmap=plt.cm.gray)


"""
STEP 3: Extract Features from the Supervised Dataset

  You need to complete the code in feed_forward_autoencoder so that the
  following command will extract features from the data.

"""

train_features = feedforward_autoencoder(opt_theta, hidden_size, input_size, train_data)
test_features  = feedforward_autoencoder(opt_theta, hidden_size, input_size, test_data)


"""
STEP 4: Train the softmax classifier

  Use softmax_train from the previous exercise to train a multi-class classifier.

  Use lambda = 1e-4 for the weight regularization for softmax

  You need to compute softmax_model using softmax_train on train_features and
  train_labels
"""

lambda_ = 1e-4 # weight decay parameter
Ejemplo n.º 5
0
for i in range(1000):
    feature_num = np.random.randint(hidden_size)
    image_num = np.random.randint(8)
    image_row = np.random.randint(image_dim - patch_dim + 1)
    image_col = np.random.randint(image_dim - patch_dim + 1)

    patch = conv_images[image_row:image_row + patch_dim,
                        image_col:image_col + patch_dim, :, image_num]
    patch = np.concatenate(
        (patch[:, :, 0].flatten(), patch[:, :,
                                         1].flatten(), patch[:, :,
                                                             2].flatten()))
    patch = (patch - mean_patch).reshape((-1, 1))
    patch = zca_white.dot(patch)  # ZCA whitening

    features = feedforward_autoencoder(opt_theta, hidden_size, visible_size,
                                       patch)

    if abs(features[feature_num, 0] -
           convolved_features[feature_num, image_num, image_row,
                              image_col]) > 1e-9:
        print('Convolved feature does not match activation from autoencoder\n')
        print('Feature Number    : {}\n'.format(feature_num))
        print('Image Number      : {}\n'.format(image_num))
        print('Image Row         : {}\n'.format(image_row))
        print('Image Column      : {}\n'.format(image_col))
        print('Convolved feature : {:f}\n'.format(
            convolved_features[feature_num, image_num, image_row, image_col]))
        print('Sparse AE feature : {:f}\n'.format(features[feature_num, 0]))
        print(
            'Error! Convolved feature does not match activation from autoencoder'
        )