예제 #1
0
                                 method='L-BFGS-B',
                                 jac=True,
                                 options=options_)
sae1_opt_theta = result.x

print(result)

##======================================================================
## STEP 3: Train the second sparse autoencoder
#  This trains the second sparse autoencoder on the first autoencoder
#  featurse.
#  If you've correctly implemented sparseAutoencoderCost.m, you don't need
#  to change anything here.

sae1_features = sparse_autoencoder.sparse_autoencoder(sae1_opt_theta,
                                                      hidden_size_L1,
                                                      input_size, train_images)

#  Randomly initialize the parameters
sae2_theta = sparse_autoencoder.initialize(hidden_size_L2, hidden_size_L1)

J = lambda x: sparse_autoencoder.sparse_autoencoder_cost(
    x, hidden_size_L1, hidden_size_L2, lambda_, sparsity_param, beta,
    sae1_features)

options_ = {'maxiter': 400, 'disp': True}

result = scipy.optimize.minimize(J,
                                 sae2_theta,
                                 method='L-BFGS-B',
                                 jac=True,
    image_num = np.random.randint(0, 8)
    image_row = np.random.randint(0, image_dim - patch_dim + 1)
    image_col = np.random.randint(0, image_dim - patch_dim + 1)

    patch = conv_images[image_row:image_row + patch_dim,
                        image_col:image_col + patch_dim, :, image_num]

    patch = np.concatenate(
        (patch[:, :, 0].flatten(), patch[:, :,
                                         1].flatten(), patch[:, :,
                                                             2].flatten()))
    patch = np.reshape(patch, (patch.size, 1))
    patch = patch - np.tile(patch_mean, (patch.shape[1], 1)).transpose()
    patch = zca_white.dot(patch)

    features = sparse_autoencoder.sparse_autoencoder(opt_theta, hidden_size,
                                                     visible_size, patch)

    if abs(features[feature_num, 0] -
           convolved_features[feature_num, image_num, image_row,
                              image_col]) > 1e-9:
        print('Convolved feature does not match activation from autoencoder')
        print('Feature Number      :', feature_num)
        print('Image Number        :', image_num)
        print('Image Row           :', image_row)
        print('Image Column        :', image_col)
        print('Convolved feature   :',
              convolved_features[feature_num, image_num, image_row, image_col])
        print('Sparse AE feature   :', features[feature_num, 0])
        sys.exit(
            "Convolved feature does not match activation from autoencoder. Exiting..."
        )
예제 #3
0
result = scipy.optimize.minimize(J, theta, method='L-BFGS-B', jac=True, options=options_)
opt_theta = result.x

print result

# Visualize the weights
W1 = opt_theta[0:hidden_size * input_size].reshape(hidden_size, input_size).transpose()
display_network.display_network(W1)

##======================================================================
## STEP 3: Extract Features from the Supervised Dataset
#
#  You need to complete the code in feedForwardAutoencoder.m so that the
#  following command will extract features from the data.

train_features = sparse_autoencoder.sparse_autoencoder(opt_theta, hidden_size,
                                                       input_size, train_data)

test_features = sparse_autoencoder.sparse_autoencoder(opt_theta, hidden_size,
                                                      input_size, test_data)

##======================================================================
## STEP 4: Train the softmax classifier

lambda_ = 1e-4
options_ = {'maxiter': 400, 'disp': True}

opt_theta, input_size, num_classes = softmax.softmax_train(hidden_size, num_labels,
                                                           lambda_, train_features,
                                                           train_labels, options_)

##======================================================================
예제 #4
0
                                                         beta, train_images)
options_ = {'maxiter': 400, 'disp': True}

result = scipy.optimize.minimize(J, sae1_theta, method='L-BFGS-B', jac=True, options=options_)
sae1_opt_theta = result.x

print result

##======================================================================
## STEP 3: Train the second sparse autoencoder
#  This trains the second sparse autoencoder on the first autoencoder
#  featurse.
#  If you've correctly implemented sparseAutoencoderCost.m, you don't need
#  to change anything here.

sae1_features = sparse_autoencoder.sparse_autoencoder(sae1_opt_theta, hidden_size_L1,
                                                      input_size, train_images)

#  Randomly initialize the parameters
sae2_theta = sparse_autoencoder.initialize(hidden_size_L2, hidden_size_L1)

J = lambda x: sparse_autoencoder.sparse_autoencoder_cost(x, hidden_size_L1, hidden_size_L2,
                                                         lambda_, sparsity_param,
                                                         beta, sae1_features)

options_ = {'maxiter': 400, 'disp': True}

result = scipy.optimize.minimize(J, sae2_theta, method='L-BFGS-B', jac=True, options=options_)
sae2_opt_theta = result.x

print result
예제 #5
0
# For 1000 random points
for i in range(1000):
    feature_num = np.random.randint(0, hidden_size)
    image_num = np.random.randint(0, 8)
    image_row = np.random.randint(0, image_dim - patch_dim + 1)
    image_col = np.random.randint(0, image_dim - patch_dim + 1)

    patch = conv_images[image_row:image_row + patch_dim, image_col:image_col + patch_dim, :, image_num]

    patch = np.concatenate((patch[:, :, 0].flatten(), patch[:, :, 1].flatten(), patch[:, :, 2].flatten()))
    patch = np.reshape(patch, (patch.size, 1))
    patch = patch - np.tile(patch_mean, (patch.shape[1], 1)).transpose()
    patch = zca_white.dot(patch)

    features = sparse_autoencoder.sparse_autoencoder(opt_theta, hidden_size, visible_size, patch)

    if abs(features[feature_num, 0] - convolved_features[feature_num, image_num, image_row, image_col]) > 1e-9:
        print 'Convolved feature does not match activation from autoencoder'
        print 'Feature Number      :', feature_num
        print 'Image Number        :', image_num
        print 'Image Row           :', image_row
        print 'Image Column        :', image_col
        print 'Convolved feature   :', convolved_features[feature_num, image_num, image_row, image_col]
        print 'Sparse AE feature   :', features[feature_num, 0]
        sys.exit("Convolved feature does not match activation from autoencoder. Exiting...")

print 'Congratulations! Your convolution code passed the test.'

## STEP 2c: Implement pooling
#  Implement pooling in the function cnnPool in cnnPool.m