Example #1
0
    hiddenSizeL1, inputSize + 1)

displayData(Theta1[:, 1:])

input('\nProgram paused. Press enter to continue.\n')

# -------------------------------------------------------------------------

##======================================================================
## STEP 2: Train the second sparse autoencoder
#  This trains the second sparse autoencoder on the first autoencoder
#  featurse.
#  If you've correctly implemented sparseAutoencoderCost.m, you don't need
#  to change anything here.

sae1Features = feedForwardAutoencoder(sae1OptTheta, hiddenSizeL1, inputSize,
                                      trainData)

#  Randomly initialize the parameters
sae2Theta = initializeParameters(hiddenSizeL2, hiddenSizeL1)

## ---------------------- YOUR CODE HERE  ---------------------------------
#  Instructions: Train the second layer sparse autoencoder, this layer has
#                an hidden size of "hiddenSizeL2" and an inputsize of
#                "hiddenSizeL1"
#
#                You should store the optimal parameters in sae2OptTheta

costFunc = lambda p: sparseAutoencoderCost(
    p, hiddenSizeL1, hiddenSizeL2, _lambda, sparsityParam, beta, sae1Features.T
)
res = minimize(costFunc,
Example #2
0
# For 1000 random points
for i in range(1000):
    featureNum = random.randint(0, hiddenSize - 1)
    imageNum = random.randint(0, 7)
    imageRow = random.randint(0, imageDim - patchDim)
    imageCol = random.randint(0, imageDim - patchDim)

    patch = convImages[imageRow:imageRow + patchDim,
                       imageCol:imageCol + patchDim, :, imageNum]
    patch = np.concatenate((patch[:, :, 0].flatten(), patch[:, :, 1].flatten(),
                            patch[:, :, 2].flatten())).reshape(-1, 1)
    patch = patch - meanPatch
    patch = ZCAWhite.dot(patch)

    features = feedForwardAutoencoder(optTheta.reshape(-1, 1), hiddenSize,
                                      visibleSize, patch)

    if abs(features[featureNum, 0] -
           convolvedFeatures[featureNum, imageNum, imageRow, imageCol] > 1e-9):
        print('Convolved feature does not match activation from autoencoder')
        print('Feature Number    : %d' % featureNum)
        print('Image Number      : %d' % imageNum)
        print('Image Row         : %d' % imageRow)
        print('Image Column      : %d' % imageCol)
        print('Convolved feature : %0.5f' %
              convolvedFeatures[featureNum, imageNum, imageRow, imageCol])
        print('Sparse AE feature : %0.5f' % features[featureNum, 0])
        exit(0)

print('Congratulations! Your convolution code passed the test.')
input('\nProgram paused. Press enter to continue.\n')
Example #3
0
iterations = 200
model = autoencoder(input_layer_size, hidden_layer_size, beta, rho, lambd)
input_data = unlabeledData.T
theta = scipy.optimize.minimize(
    model.autoencoder_Cost_Grad,
    x0=model.theta,
    args=(input_data,),
    method="L-BFGS-B",
    jac=True,
    options={"maxiter": iterations},
)


W1 = theta.x[0 : model.W1_dim].reshape(hidden_layer_size, input_layer_size)
b1 = theta.x[model.W1_dim + model.W2_dim : model.W1_dim + model.W2_dim + model.b1_dim].reshape(hidden_layer_size, 1)

showHiddenIMAGES(W1, patch_size, hidden_patch_size)

############# Extracting features
feedForward_train = feedForwardAutoencoder(W1, b1, trainData.T)
feedForward_test = feedForwardAutoencoder(W1, b1, testData.T)
a2_train = feedForward_train.hidden_layer_activiation()
a2_test = feedForward_test.hidden_layer_activiation()


############# Training and testing the logistic regression model
iterations = 100
option = {"maxiter": iterations}
theta_labeled = softmaxTrain(hidden_layer_size, numLabels, lambd, (a2_train.T, trainLabels), option)
pred = softmaxPredict(theta_labeled, (a2_test.T, testLabels))
Example #4
0
#  STEP 2: Train the sparse autoencoder
#
#  This trains the sparse autoencoder on the unlabeled training images
#  and saves the trained parameters to disk for later use.
trainAutoencoder.run_training(FLAGS, unlabeled_images)

# ======================================================================
#  STEP 3: Extract Features from the Supervised Dataset
#
#  You need to complete the code in feedForwardAutoencoder.py and use it
#  here to extract features from train and test images.


# ----------------- YOUR CODE HERE ----------------------

train_features = feedForwardAutoencoder(FLAGS, train_images)
test_features = feedForwardAutoencoder(FLAGS, test_images)

# ======================================================================
#  STEP 4: Train the softmax classifier

#  Set parameters for Softmax classifier
FLAGS.visibleSize = train_features.shape[0]
FLAGS.decay = 1e-4

#  The trainSoftmax.py uses softmax.py from the previous assignment to train
#  a multi-class classifier on trainFeatures and trainLabels.

theta = trainSoftmax.run_training(FLAGS, train_features, train_labels)

# ======================================================================
#train_set= (train_set[0][1:50],train_set[1][1:50])
#######  Train the first sparse autoencoder
iterations = 200
model_1=autoencoder(input_layer_size, hidden_layer_size_1, beta, rho, lambd)
theta_1= scipy.optimize.minimize(model_1.autoencoder_Cost_Grad, x0=model_1.theta, 
                                 args = (train_set[0].T,), 
                                 method = 'L-BFGS-B', 
                                 jac = True, 
                                 options = {'maxiter': iterations}) 
                                  
W1_1 = theta_1.x[0:model_1.W1_dim].reshape(hidden_layer_size_1,input_layer_size)
b1_1 = theta_1.x[model_1.W1_dim+model_1.W2_dim: model_1.W1_dim+model_1.W2_dim +model_1.b1_dim].reshape(hidden_layer_size_1,1) 

#######  Train the second sparse autoencoder
iterations = 200
feedForward_train_1 = feedForwardAutoencoder(W1_1,b1_1, train_set[0].T)
a2_train = feedForward_train_1.hidden_layer_activiation()

model_2=autoencoder(hidden_layer_size_1, hidden_layer_size_2, beta, rho, lambd)
theta_2= scipy.optimize.minimize(model_2.autoencoder_Cost_Grad, x0=model_2.theta, 
                                 args = (a2_train,), 
                                 method = 'L-BFGS-B', 
                                 jac = True, 
                                 options = {'maxiter': iterations})  
W1_2 = theta_2.x[0:model_2.W1_dim].reshape(hidden_layer_size_2,hidden_layer_size_1)
b1_2 = theta_2.x[model_2.W1_dim+model_2.W2_dim: model_2.W1_dim+model_2.W2_dim +model_2.b1_dim].reshape(hidden_layer_size_2,1) 
                                 

                                                              
####### Train the softmax classifier on the L2 features
iterations = 200
Example #6
0
opttheta = res.x

Theta1 = opttheta[0:hiddenSize * (inputSize + 1)].reshape(
    hiddenSize, inputSize + 1)

displayData(Theta1[:, 1:])

input('Program paused. Press enter to continue.')

##======================================================================
## STEP 3: Extract Features from the Supervised Dataset
#
#  You need to complete the code in feedForwardAutoencoder.m so that the
#  following command will extract features from the data.

trainFeatures = feedForwardAutoencoder(opttheta, hiddenSize, inputSize,
                                       trainData)
testFeatures = feedForwardAutoencoder(opttheta, hiddenSize, inputSize,
                                      testData)

##======================================================================
## STEP 4: Train the softmax classifier

inputSize = 200
numClasses = 10

_lambda = 1e-4

theta = 0.005 * np.random.randn(numClasses * inputSize)

costFunc = lambda p: softmaxCost(p, numClasses, inputSize, _lambda,
                                 trainFeatures, trainLabels)
Example #7
0
print()

# ======================================================================
#  STEP 2: Train the sparse autoencoder
#  This trains the sparse autoencoder on the unlabeled training images,
#  and save the trained parameters to disk.
saver_path = trainAutoencoder.run_training(FLAGS, unlabeled_images)

# ======================================================================
#  STEP 3: Extract Features from the Supervised Dataset

# ----------------- YOUR CODE HERE ----------------------
#  Use feedForwardAutoencoder to extract featuers from train and test images
#  You need to complete the code in feedForwardAutoencoder.py.

train_features = feedForwardAutoencoder(saver_path, train_images)
test_features = feedForwardAutoencoder(saver_path, test_images)

# ======================================================================
#  STEP 4: Train the softmax classifier
#

# Set parameters for softmax classifier
FLAGS.learning_rate = 0.1
FLAGS.visibleSize = train_features.shape[1]
FLAGS.max_steps = 80000
FLAGS.batch_size = 100

# Utilize the built-in DataSet to create datasets for training and testing.
# Set reshape = False and dtype = tf.uint8 to avoid changing of the features
# by DataSet.