Пример #1
0
#  allow your sparse autoencoder to get good filters; you do not need to
#  change the parameters below.

input_size = 28 * 28
num_classes = 10
hidden_size_L1 = 10  # Layer 1 Hidden Size
hidden_size_L2 = 10  # Layer 2 Hidden Size
lambda_ = 3e-3  # weight decay parameter

# ======================================================================
# STEP 1: Load data from the MNIST database
#
#  This loads our training data from the MNIST database files.

train_images = load_MNIST.load_MNIST_images('train-images.idx3-ubyte')
train_labels = load_MNIST.load_MNIST_labels('train-labels.idx1-ubyte')
train_images = train_images[:, 0:10]
train_labels = train_labels[0:10]

# ======================================================================
# STEP 2: Train the first sparse autoencoder
#  This trains the first sparse autoencoder on the unlabelled STL training
#  images.
#  If you've correctly implemented sparseAutoencoderCost.m, you don't need
#  to change anything here.

#  Randomly initialize the parameters
sae1_theta = utils_hw.initialize(hidden_size_L1, input_size)

J = lambda x: utils_hw.sparse_autoencoder_cost(x, input_size, hidden_size_L1,
                                               lambda_, train_images)
input_size = 28 * 28
num_classes = 10
hidden_size_L1 = 200  # Layer 1 Hidden Size
hidden_size_L2 = 200  # Layer 2 Hidden Size
sparsity_param = 0.1  # desired average activation of the hidden units.
lambda_ = 3e-3  # weight decay parameter
beta = 3  # weight of sparsity penalty term

##======================================================================
## STEP 1: Load data from the MNIST database
#
#  This loads our training data from the MNIST database files.

train_images = load_MNIST.load_MNIST_images('data/mnist/train-images-idx3-ubyte')
train_labels = load_MNIST.load_MNIST_labels('data/mnist/train-labels-idx1-ubyte')


##======================================================================
## STEP 2: Train the first sparse autoencoder
#  This trains the first sparse autoencoder on the unlabelled STL training
#  images.
#  If you've correctly implemented sparseAutoencoderCost.m, you don't need
#  to change anything here.

#  Randomly initialize the parameters
sae1_theta = sparse_autoencoder.initialize(hidden_size_L1, input_size)

J = lambda x: sparse_autoencoder.sparse_autoencoder_cost(x, input_size, hidden_size_L1,
                                                         lambda_, sparsity_param,
                                                         beta, train_images)
                       #  in the lecture notes).
lambda_ = 3e-3         # weight decay parameter
beta = 3               # weight of sparsity penalty term

maxiter = 400          # Maximum iterations for training

"""
STEP 1: Load data from the MNIST database

  This loads our training data from the MNIST database files.
"""

# Load MNIST database files
# Load MNIST database files
train_data   = load_MNIST_images('data/mnist/train-images-idx3-ubyte')
train_labels = load_MNIST_labels('data/mnist/train-labels-idx1-ubyte')


"""
STEP 2: Train the first sparse autoencoder

  This trains the first sparse autoencoder on the unlabelled STL training images.
  If you've correctly implemented sparse_autoencoder_cost, you don't need
  to change anything here.
"""

# Randomly initialize the parameters
sae1_theta = initialize_parameters(hidden_size_L1, input_size)

#  Instructions: Train the first layer sparse autoencoder, this layer has
#                an hidden size of "hidden_size_L1"
Пример #4
0
num_labels = 5
hidden_size = 196

sparsity_param = 0.1  # desired average activation of the hidden units.
lambda_ = 3e-3  # weight decay parameter
beta = 3  # weight of sparsity penalty term

## ======================================================================
#  STEP 1: Load data from the MNIST database
#
#  This loads our training and test data from the MNIST database files.
#  We have sorted the data for you in this so that you will not have to
#  change it.

images = load_MNIST.load_MNIST_images('data/mnist/train-images-idx3-ubyte')
labels = load_MNIST.load_MNIST_labels('data/mnist/train-labels-idx1-ubyte')

unlabeled_index = np.argwhere(labels >= 5).flatten()
labeled_index = np.argwhere(labels < 5).flatten()

num_train = round(labeled_index.shape[0] / 2)
train_index = labeled_index[0:num_train]
test_index = labeled_index[num_train:]

unlabeled_data = images[:, unlabeled_index]

train_data = images[:, train_index]
train_labels = labels[train_index]

test_data = images[:, test_index]
test_labels = labels[test_index]
Пример #5
0
## STEP 0: Here we provide the relevant parameters values

input_size = 28 * 28
num_classes = 10
hidden_size_L1 = 40  # Layer 1 Hidden Size
hidden_size_L2 = 40  # Layer 2 Hidden Size
lambda_ = 3e-3  # weight decay parameter

##======================================================================
## STEP 1: Load data from the MNIST database
#
#  This loads our training data from the MNIST database files.

train_images = load_MNIST.load_MNIST_images(
    'C:/Users/leon/Documents/data/mnist/train-images-idx3-ubyte')
train_labels = load_MNIST.load_MNIST_labels(
    'C:/Users/leon/Documents/data/mnist/train-labels-idx1-ubyte')
train_images = train_images[:, 0:]
train_labels = train_labels[0:]
train_images = train_images[:, 0:500]
train_labels = train_labels[0:500]

##======================================================================
## STEP 2: Train the first sparse autoencoder
#  This trains the first sparse autoencoder on the unlabelled STL training
#  images.

#  Randomly initialize the parameters
sae1_theta = utils_hw.initialize(hidden_size_L1, input_size)

J = lambda x: utils_hw.sparse_autoencoder_cost(x, input_size, hidden_size_L1,
                                               lambda_, train_images)