Example #1
0
def main(testing=True):
  images = mnist.load_images('../data/train-images-idx3-ubyte')  # 784 x 60000
  labels = mnist.load_labels('../data/train-labels-idx1-ubyte')  # 60000 x 1
  util.display_network(images[:,0:100])  # Show the first 100 images

  visible_size = 28*28
  hidden_size = 196
  sparsity_param = 0.1
  lamb = 3e-3
  beta = 3
  patches = images[:,0:10000]
  theta = autoencoder.initialize_parameters(hidden_size, visible_size)
  def sal(theta):
    return autoencoder.sparse_autoencoder_loss(theta, visible_size, hidden_size, lamb,
                                               sparsity_param, beta, patches)
  x, f, d = scipy.optimize.fmin_l_bfgs_b(sal, theta, maxfun=400, iprint=1, m=20)
  W1, W2, b1, b2 = autoencoder.unflatten(x, visible_size, hidden_size)
  util.display_network(W1.T)
Example #2
0
# - `hidden_size_l1` - layer 1 hidden size
# - `hidden_size_l2` - layer 2 hidden size
# - `sparsity_param` - desired average activation of the hidden units
#   (\\(\\rho\\) in the lecture notes)
# - `lamb` - weight decay parameter
# - `beta` - weight of sparsity penalty term
input_size = 28 * 28
num_classes = 10
hidden_size_l1 = 200
hidden_size_l2 = 200
sparsity_param = 0.1
lamb = 3e-3
beta = 3

# === Step 1: Load data from the MNIST database ===
train_data = mnist.load_images('../data/train-images-idx3-ubyte')
train_labels = mnist.load_labels('../data/train-labels-idx1-ubyte')

# For debugging purposes, reduce the size of the input data in order
# to speed up gradient checking.  Here, we consider only the eight
# most-varying pixels of the images, and only the first 100 images.

if DEBUG:
    input_size = 64
    # only 100 datapoints
    train_data = train_data[:, :100]
    train_labels = train_labels[:100]
    # only top input_size most-varying input elements (pixels)
    indices = train_data.var(1).argsort()[-input_size:]
    train_data = np.asfortranarray(train_data[indices, :])
Example #3
0
#
#  Here we define and initialise some constants which allow the code
#  to be used more generally on any arbitrary input.  We also
#  initialise some parameters used for tuning the model.

input_size = 28 * 28
num_classes = 10
lamb = 1e-4

# === Step 1: Load data ===
#
#  In this section, we load the input and output data.  For softmax
#  regression on MNIST pixels, the input data is the images, and the
#  output data is the labels.

images = mnist.load_images('../data/train-images-idx3-ubyte')
labels = mnist.load_labels('../data/train-labels-idx1-ubyte')
input_data = images

# For debugging purposes, reduce the size of the input data in order
# to speed up gradient checking.  Here, we consider only the eight
# most-varying pixels of the images, and only the first 100 images.

DEBUG = False
if DEBUG:
  input_size = 8
  # only 100 datapoints
  input_data = input_data[:, :100]
  labels = labels[:100]
  # only top input_size most-varying input elements (pixels)
  indices = input_data.var(1).argsort()[-input_size:]
from library import mnist
from library import nn
from library import sparsedbn
from library import util

import scipy.optimize


if __name__ == '__main__':

	DISPLAY = False

	inputSize = 28 * 28
	numOfClasses = 10

	trainData = mnist.load_images('../data/train-images-idx3-ubyte')
	trainLabels = mnist.load_labels('../data/train-labels-idx1-ubyte')
	testData = mnist.load_images('../data/t10k-images-idx3-ubyte')
	testLabels = mnist.load_labels('../data/t10k-labels-idx1-ubyte')

	# Build the cnn structure.
	stack = util.Empty()
	stack.layers = [util.Empty(), util.Empty()]
	stack.layers[0].type = 'i'
	stack.layers[0].size = inputSize

	stack.layers[1].type = 'rbm'
	stack.layers[1].size = 400

	# stack.layers[2].type = 'rbm'
	# stack.layers[2].size = 400
import scipy.optimize


if __name__ == '__main__':

	inputSize = 28 * 28
	numOfLabels = 5
	hiddenSize = 200
	sparsityParam = 0.1
	lamb = 3e-3
	beta = 3
	maxfun = 400

	print 'Loading raw MNIST data...'
	mnist_data = mnist.load_images('../data/train-images-idx3-ubyte')
	mnist_labels = mnist.load_labels('../data/train-labels-idx1-ubyte')

	# Simulate a Labeled and Unlabeled set

	print 'Splitting MNIST data...'
	labeledSet = mnist_labels <= 4
	unlabeledSet = mnist_labels >= 5

	unlabeledData = mnist_data[:, unlabeledSet]
	labeledData = mnist_data[:, labeledSet]
	labels = mnist_labels[labeledSet]

	numOfTrain = labels.size / 2

	trainData = labeledData[:, :numOfTrain]
# - `hidden_size_l1` - layer 1 hidden size
# - `hidden_size_l2` - layer 2 hidden size
# - `sparsity_param` - desired average activation of the hidden units
#   (\\(\\rho\\) in the lecture notes)
# - `lamb` - weight decay parameter
# - `beta` - weight of sparsity penalty term
input_size = 28 * 28
num_classes = 10
hidden_size_l1 = 200
hidden_size_l2 = 200
sparsity_param = 0.1
lamb = 3e-3
beta = 3

# === Step 1: Load data from the MNIST database ===
train_data = mnist.load_images("../data/train-images-idx3-ubyte")
train_labels = mnist.load_labels("../data/train-labels-idx1-ubyte")

# For debugging purposes, reduce the size of the input data in order
# to speed up gradient checking.  Here, we consider only the eight
# most-varying pixels of the images, and only the first 100 images.

if DEBUG:
    input_size = 64
    # only 100 datapoints
    train_data = train_data[:, :100]
    train_labels = train_labels[:100]
    # only top input_size most-varying input elements (pixels)
    indices = train_data.var(1).argsort()[-input_size:]
    train_data = np.asfortranarray(train_data[indices, :])