def train(self, train, hidden_size, debug = True):
     self.layout = (hidden_size, train.shape[0])
     self.sample_size = train.shape[1]
     theta = initialize(hidden_size, train.shape[0])
     self.activations[0] = train
     if debug:
         self.check_gradient(theta)
     else:
         options = {'maxiter': 500, 'disp': True}
         J = lambda x: self.forward_back(theta = x, debug = False)
         result = minimize(J, theta, method='L-BFGS-B', jac=True, options=options)
         opt_theta = result.x
         print opt_theta
Exemplo n.º 2
0
#  This loads our training data from the MNIST database files.

train_images = load_MNIST.load_MNIST_images(
    'data/mnist/train-images-idx3-ubyte')
train_labels = load_MNIST.load_MNIST_labels(
    'data/mnist/train-labels-idx1-ubyte')

##======================================================================
## STEP 2: Train the first sparse autoencoder
#  This trains the first sparse autoencoder on the unlabelled STL training
#  images.
#  If you've correctly implemented sparseAutoencoderCost.m, you don't need
#  to change anything here.

#  Randomly initialize the parameters
sae1_theta = sparse_autoencoder.initialize(hidden_size_L1, input_size)

J = lambda x: sparse_autoencoder.sparse_autoencoder_cost(
    x, input_size, hidden_size_L1, lambda_, sparsity_param, beta, train_images)
options_ = {'maxiter': 400, 'disp': True}

result = scipy.optimize.minimize(J,
                                 sae1_theta,
                                 method='L-BFGS-B',
                                 jac=True,
                                 options=options_)
sae1_opt_theta = result.x

print(result)

##======================================================================
Exemplo n.º 3
0
##======================================================================
## STEP 1: Implement sampleIMAGES
#
#  After implementing sampleIMAGES, the display_network command should
#  display a random sample of 200 patches from the dataset

# Loading Sample Images
# patches = sample_images.sample_images()

# Loading 10K images from MNIST database
images = load_MNIST.load_MNIST_images('data/mnist/train-images-idx3-ubyte')
patches = images[:, 0:10000]

#  Obtain random parameters theta
theta = sparse_autoencoder.initialize(hidden_size, visible_size)

##======================================================================
## STEP 2: Implement sparseAutoencoderCost
#
#  You can implement all of the components (squared error cost, weight decay term,
#  sparsity penalty) in the cost function at once, but it may be easier to do
#  it step-by-step and run gradient checking (see STEP 3) after each step.  We
#  suggest implementing the sparseAutoencoderCost function using the following steps:
#
#  (a) Implement forward propagation in your neural network, and implement the
#      squared error term of the cost function.  Implement backpropagation to
#      compute the derivatives.   Then (using lambda=beta=0), run Gradient Checking
#      to verify that the calculations corresponding to the squared error cost
#      term are correct.
#
Exemplo n.º 4
0
#
#  This loads our training data from the MNIST database files.

train_images = load_MNIST.load_MNIST_images('data/mnist/train-images-idx3-ubyte')
train_labels = load_MNIST.load_MNIST_labels('data/mnist/train-labels-idx1-ubyte')


##======================================================================
## STEP 2: Train the first sparse autoencoder
#  This trains the first sparse autoencoder on the unlabelled STL training
#  images.
#  If you've correctly implemented sparseAutoencoderCost.m, you don't need
#  to change anything here.

#  Randomly initialize the parameters
sae1_theta = sparse_autoencoder.initialize(hidden_size_L1, input_size)

J = lambda x: sparse_autoencoder.sparse_autoencoder_cost(x, input_size, hidden_size_L1,
                                                         lambda_, sparsity_param,
                                                         beta, train_images)
options_ = {'maxiter': 400, 'disp': True}

result = scipy.optimize.minimize(J, sae1_theta, method='L-BFGS-B', jac=True, options=options_)
sae1_opt_theta = result.x

print result

##======================================================================
## STEP 3: Train the second sparse autoencoder
#  This trains the second sparse autoencoder on the first autoencoder
#  featurse.
Exemplo n.º 5
0
train_labels = labels[train_index]

test_data = images[:, test_index]
test_labels = labels[test_index]

print '# examples in unlabeled set: {0:d}\n'.format(unlabeled_data.shape[1])
print '# examples in supervised training set: {0:d}\n'.format(train_data.shape[1])
print '# examples in supervised testing set: {0:d}\n'.format(test_data.shape[1])

## ======================================================================
#  STEP 2: Train the sparse autoencoder
#  This trains the sparse autoencoder on the unlabeled training
#  images.

#  Randomly initialize the parameters
theta = sparse_autoencoder.initialize(hidden_size, input_size)

J = lambda x: sparse_autoencoder.sparse_autoencoder_cost(x, input_size, hidden_size,
                                                         lambda_, sparsity_param,
                                                         beta, unlabeled_data)

options_ = {'maxiter': 400, 'disp': True}
result = scipy.optimize.minimize(J, theta, method='L-BFGS-B', jac=True, options=options_)
opt_theta = result.x

print result

# Visualize the weights
W1 = opt_theta[0:hidden_size * input_size].reshape(hidden_size, input_size).transpose()
display_network.display_network(W1)