Example #1
0
 def sal(theta):
   return autoencoder.sparse_autoencoder_loss(theta, visible_size, hidden_size, lamb,
                                              sparsity_param, beta, patches)
Example #2
0
# Train the first sparse autoencoder on the unlabelled STL training
# images.

# Randomly initialize the parameters
sae1_theta = autoencoder.initialize_parameters(hidden_size_l1, input_size)

# Train the first layer sparse autoencoder. This layer has a hidden
# size of `hidden_size_l1`.

# sae1_opt_theta, loss = minFunc( @(p) sparseAutoencoderLoss(p, ...
#     inputSize, hiddenSizeL1, ...
#     lambda, sparsityParam, ...
#     beta, trainData), ...
#     sae1Theta, options);

fn = lambda theta: autoencoder.sparse_autoencoder_loss(
    theta, input_size, hidden_size_l1, lamb, sparsity_param, beta, train_data)
sae1_opt_theta, loss, d = (scipy.optimize.fmin_l_bfgs_b(fn,
                                                        sae1_theta,
                                                        maxfun=maxfun,
                                                        iprint=1))

if DISPLAY:
    W1, W2, b1, b2 = autoencoder.unflatten(sae1_opt_theta, input_size,
                                           hidden_size_l1)
    util.display_network(W1.T)

# === Step 3: Train the second sparse autoencoder ===
#
# Train the second sparse autoencoder on the first autoencoder features.
sae1_features = autoencoder.feedforward_autoencoder(sae1_opt_theta,
                                                    hidden_size_l1, input_size,
 def sal(theta):
   return autoencoder.sparse_autoencoder_loss(theta, visible_size, hidden_size, lamb,
                                              target_activation, beta, patches)
Example #4
0
 def sal(theta):
     return autoencoder.sparse_autoencoder_loss(theta, visible_size,
                                                hidden_size, lamb,
                                                target_activation, beta,
                                                patches)
# images.

# Randomly initialize the parameters
sae1_theta = autoencoder.initialize_parameters(hidden_size_l1, input_size)

# Train the first layer sparse autoencoder. This layer has a hidden
# size of `hidden_size_l1`.

# sae1_opt_theta, loss = minFunc( @(p) sparseAutoencoderLoss(p, ...
#     inputSize, hiddenSizeL1, ...
#     lambda, sparsityParam, ...
#     beta, trainData), ...
#     sae1Theta, options);

fn = lambda theta: autoencoder.sparse_autoencoder_loss(
    theta, input_size, hidden_size_l1, lamb, sparsity_param, beta, train_data
)
sae1_opt_theta, loss, d = scipy.optimize.fmin_l_bfgs_b(fn, sae1_theta, maxfun=maxfun, iprint=1)

if DISPLAY:
    W1, W2, b1, b2 = autoencoder.unflatten(sae1_opt_theta, input_size, hidden_size_l1)
    util.display_network(W1.T)

# === Step 3: Train the second sparse autoencoder ===
#
# Train the second sparse autoencoder on the first autoencoder features.
sae1_features = autoencoder.feedforward_autoencoder(sae1_opt_theta, hidden_size_l1, input_size, train_data)

# Randomly initialize the parameters
sae2_theta = autoencoder.initialize_parameters(hidden_size_l2, hidden_size_l1)
test_labels = labels[num_train:]

# Output some statistics
print '# examples in unlabeled set: %d' % unlabeled_data.shape[1]
print '# examples in supervised training set: %d' % train_data.shape[1]
print '# examples in supervised testing set: %d' % test_data.shape[1]

# === Step 2: Train the sparse autoencoder ===
#
# This trains the sparse autoencoder on the unlabeled training images.

# Randomly initialize the parameters
theta = autoencoder.initialize_parameters(hidden_size, input_size)

# The single-parameter function to minimize
fn = lambda theta: autoencoder.sparse_autoencoder_loss(
  theta, input_size, hidden_size,lamb, sparsity_param, beta, unlabeled_data)
# Find `opt_theta` by running the sparse autoencoder on unlabeled
# training images.
opt_theta, loss, d = (
  scipy.optimize.fmin_l_bfgs_b(fn, theta, maxfun=maxfun, iprint=1, m=20))

# Visualize weights
W1, W2, b1, b2 = autoencoder.unflatten(opt_theta, input_size, hidden_size)
util.display_network(W1.T)

# === Step 3: Extract Features from the Supervised Dataset ===
train_features = autoencoder.feedforward_autoencoder(
  opt_theta, hidden_size, input_size, train_data)
test_features = autoencoder.feedforward_autoencoder(
  opt_theta, hidden_size, input_size, test_data)
Example #7
0
test_labels = labels[num_train:]

# Output some statistics
print '# examples in unlabeled set: %d' % unlabeled_data.shape[1]
print '# examples in supervised training set: %d' % train_data.shape[1]
print '# examples in supervised testing set: %d' % test_data.shape[1]

# === Step 2: Train the sparse autoencoder ===
#
# This trains the sparse autoencoder on the unlabeled training images.

# Randomly initialize the parameters
theta = autoencoder.initialize_parameters(hidden_size, input_size)

# The single-parameter function to minimize
fn = lambda theta: autoencoder.sparse_autoencoder_loss(
    theta, input_size, hidden_size, lamb, sparsity_param, beta, unlabeled_data)
# Find `opt_theta` by running the sparse autoencoder on unlabeled
# training images.
opt_theta, loss, d = (scipy.optimize.fmin_l_bfgs_b(fn,
                                                   theta,
                                                   maxfun=maxfun,
                                                   iprint=1,
                                                   m=20))

# Visualize weights
W1, W2, b1, b2 = autoencoder.unflatten(opt_theta, input_size, hidden_size)
util.display_network(W1.T)

# === Step 3: Extract Features from the Supervised Dataset ===
train_features = autoencoder.feedforward_autoencoder(opt_theta, hidden_size,
                                                     input_size, train_data)