Exemplo n.º 1
0
def trainClassifier(n_classes,lamda,images,labels,options):
    """
    函数功能:训练softmax训练器
    """
    # 对数据的维度进行重构以适应softmax训练器的接口
    n_train_images = images.shape[1]
    softmax_images = np.transpose(images, axes = [0,2,3,1])
    softmax_images = softmax_images.reshape((int(softmax_images.size / n_train_images), n_train_images))
    input_size = int(softmax_images.size / n_train_images)
    softmax_labels = labels.flatten() - 1 # 确保标注空间的值域为[0,n_classes-1]
    # 训练softmax分类器
    softmax_module = softmax.softmax_train(input_size,n_classes,lamda,softmax_images,softmax_labels,options)
    #(softmax_theta,softmax_input_size,softmax_n_classes) = softmax_module
    return softmax_module
Exemplo n.º 2
0
pooled_features_train = pooled_features_data['pooled_features_train']
pooled_features_test  = pooled_features_data['pooled_features_test']

# Setup parameters for softmax
softmax_lambda = 1e-4
n_classes = 4

softmax_input_size = int(pooled_features_train.size / n_train_images)

# Reshape the pooled_features to form an input vector for softmax
softmax_X = np.transpose(pooled_features_train, axes=[0, 2, 3, 1])
softmax_X = softmax_X.reshape((softmax_input_size, n_train_images))
softmax_Y = train_labels

options = {'maxiter': 200, 'disp': True}
softmax_model = softmax_train(softmax_input_size,
                              n_classes, softmax_lambda, softmax_X, softmax_Y, options)


"""
  STEP 5: Test classifer
  Now you will test your trained classifer against the test images

"""
softmax_input_size = int(pooled_features_test.size / n_test_images)

softmax_X = np.transpose(pooled_features_test, axes=[0, 2, 3, 1])
softmax_X = softmax_X.reshape((softmax_input_size, n_test_images))
softmax_Y = test_labels

# Make predictions
pred = softmax_predict(softmax_model, softmax_X)
Exemplo n.º 3
0
print result

# ======================================================================
# STEP 4: Train the softmax classifier
#  This trains the sparse autoencoder on the second autoencoder features.
#  If you've correctly implemented softmaxCost.m, you don't need
#  to change anything here.

sae2_features = utils_hw.sparse_autoencoder(sae2_opt_theta, hidden_size_L2,
                                            hidden_size_L1, sae1_features)

options_ = {'maxiter': 400, 'disp': True}

softmax_theta, softmax_input_size, softmax_num_classes = softmax.softmax_train(hidden_size_L2, num_classes,
                                                                               lambda_, sae2_features,
                                                                               train_labels, options_)

# ======================================================================
# STEP 5: Finetune softmax model

# Implement the stacked_autoencoder_cost to give the combined cost of the whole model
# then run this cell.


# Initialize the stack using the parameters learned
stack = [dict() for i in range(2)]
stack[0]['w'] = sae1_opt_theta[0:hidden_size_L1 * input_size].reshape(hidden_size_L1, input_size)
stack[0]['b'] = sae1_opt_theta[2 * hidden_size_L1 * input_size:2 * hidden_size_L1 * input_size + hidden_size_L1]
stack[1]['w'] = sae2_opt_theta[0:hidden_size_L1 * hidden_size_L2].reshape(hidden_size_L2, hidden_size_L1)
stack[1]['b'] = sae2_opt_theta[2 * hidden_size_L1 * hidden_size_L2:2 * hidden_size_L1 * hidden_size_L2 + hidden_size_L2]
  This trains the sparse autoencoder on the second autoencoder features.
  If you've correctly implemented softmax_cost, you don't need
  to change anything here.
"""

sae2_features = feedforward_autoencoder(sae2_opt_theta, hidden_size_L2, hidden_size_L1, sae1_features)

#  Instructions: Train the softmax classifier, the classifier takes in
#                input of dimension "hidden_sizeL2" corresponding to the
#                hidden layer size of the 2nd layer.
#
#                You should store the optimal parameters in sae_softmax_opt_theta


options = {'maxiter': maxiter, 'disp': True}
softmax_model = softmax_train(hidden_size_L2, n_classes, lambda_, sae2_features, train_labels, options)
softmax_opt_theta = softmax_model['opt_theta']


"""
STEP 5: Finetune softmax model
"""

# Implement the stacked_ae_cost to give the combined cost of the whole model then run this cell.

# Initialize the stack using the parameters learned

n_stack = 2 # Two layers
stack = [{} for i in range(n_stack)]

stack[0]['w'] = sae1_opt_theta[0:hidden_size_L1*input_size].reshape((hidden_size_L1, input_size))
Exemplo n.º 5
0
W1 = opt_theta[0:hidden_size * input_size].reshape(hidden_size, input_size).transpose()
display_network.display_network(W1)

##======================================================================
## STEP 3: Extract Features from the Supervised Dataset
#
#  You need to complete the code in feedForwardAutoencoder.m so that the
#  following command will extract features from the data.

train_features = sparse_autoencoder.sparse_autoencoder(opt_theta, hidden_size,
                                                       input_size, train_data)

test_features = sparse_autoencoder.sparse_autoencoder(opt_theta, hidden_size,
                                                      input_size, test_data)

##======================================================================
## STEP 4: Train the softmax classifier

lambda_ = 1e-4
options_ = {'maxiter': 400, 'disp': True}

opt_theta, input_size, num_classes = softmax.softmax_train(hidden_size, num_labels,
                                                           lambda_, train_features,
                                                           train_labels, options_)

##======================================================================
## STEP 5: Testing

predictions = softmax.softmax_predict((opt_theta, input_size, num_classes), test_features)
print "Accuracy: {0:.2f}%".format(100 * np.sum(predictions == test_labels, dtype=np.float64) / test_labels.shape[0])
Exemplo n.º 6
0
                                                            m=10,
                                                            factr=10.0,
                                                            pgtol=1e-8,
                                                            iprint=1)

    print 'will calculate l3 features...'
    l3_activations = feedforward_autoencoder(l3_model, hidden_size,
                                             hidden_size, l2_activations)
    assert l3_activations.shape == (hidden_size, l2_activations.shape[1])

    np.save('l3.0to9.model', l3_model)

    print 'will train classifier...'
    # train softmax classifier on autoencoded features
    classifier = softmax.softmax_train(hidden_size, num_classes,
                                       softmax_weight_decay, l3_activations,
                                       train_labels, max_iter)

    np.save('softmax.0to9.model', classifier)

    # use model to predict
    print 'will load test data...'
    test_patches, test_labels = get_data('../data/mnist.pkl.gz',
                                         train=False,
                                         num_samples=num_samples)

    print 'will compute test features...'
    test_l2_activations = feedforward_autoencoder(l2_model, hidden_size,
                                                  visible_size, test_patches)
    assert test_l2_activations.shape == (hidden_size, test_patches.shape[1])
Exemplo n.º 7
0
    diff = np.linalg.norm(numgrad - grad) / np.linalg.norm(numgrad + grad)
    print("Norm of difference = ", diff)

    # The difference should be small.
    # In our implementation, these values are usually less than 1e-7.

    # When your gradients are correct, congratulations!
"""
STEP 4: Learning parameters

  Once you have verified that your gradients are correct,
  you can start training your softmax regression code using softmax_train.
"""

options = {'maxiter': 100, 'disp': True}
model = softmax_train(input_size, n_classes, lambda_, input_data, labels,
                      options)

# Although we only use 100 iterations here to train a classifier for the
# MNIST data set, in practice, training for more iterations is usually
# beneficial.
"""
STEP 5: Testing

  You should now test your model against the test images.
  To do this, you will first need to write softmax_predict,
  which should return predictions given a softmax model and the input data.
"""
images = load_MNIST_images('data/mnist/t10k-images-idx3-ubyte')
labels = load_MNIST_labels('data/mnist/t10k-labels-idx1-ubyte')
input_data = images
Exemplo n.º 8
0

"""
STEP 4: Train the softmax classifier

  Use softmax_train from the previous exercise to train a multi-class classifier.

  Use lambda = 1e-4 for the weight regularization for softmax

  You need to compute softmax_model using softmax_train on train_features and
  train_labels
"""

lambda_ = 1e-4 # weight decay parameter
options = {'maxiter': maxiter, 'disp': True}
softmax_model = softmax_train(hidden_size, n_labels, lambda_, train_features, train_labels, options)
"""
STEP 5: Testing

  Compute Predictions on the test set (test_features) using softmax_predict
  and softmax_model
"""

# Make predictions
pred = softmax_predict(softmax_model, test_features)

acc = np.mean(test_labels == pred)
print("The Accuracy (with learned features): {:5.2f}% \n".format(acc*100))

"""
 Accuracy is the proportion of correctly classified images
Exemplo n.º 9
0
                                           lambda l: l <= 4, 
                                           train=True, 
                                           num_samples=100000)

    print 'will calculate training features...'
    train_activations = feedforward_autoencoder(feature_model, 
                                                hidden_size, 
                                                visible_size, 
                                                train_patches)
    assert train_activations.shape == (hidden_size, train_patches.shape[1])

    print 'will train classifier...'
    # train softmax classifier on autoencoded features
    trained = softmax.softmax_train(hidden_size, 
                                    num_classes, 
                                    weight_decay, 
                                    train_activations,
                                    train_labels,
                                    max_iter)
    
    np.save('softmax.0to4.model', trained)
 
    # use model to predict
    print 'will load test data...'
    test_patches, test_labels = get_data('../data/mnist.pkl.gz', 
                                         lambda l: l <= 4, 
                                         train=False, 
                                         num_samples=100000)

    print 'will compute test features...'
    test_activations = feedforward_autoencoder(feature_model, 
                                               hidden_size, 
Exemplo n.º 10
0
test_features = feedforward_autoencoder(opt_theta, hidden_size, input_size,
                                        test_data)
"""
STEP 4: Train the softmax classifier

  Use softmax_train from the previous exercise to train a multi-class classifier.

  Use lambda = 1e-4 for the weight regularization for softmax

  You need to compute softmax_model using softmax_train on train_features and
  train_labels
"""

lambda_ = 1e-4  # weight decay parameter
options = {'maxiter': maxiter, 'disp': True}
softmax_model = softmax_train(hidden_size, n_labels, lambda_, train_features,
                              train_labels, options)
"""
STEP 5: Testing

  Compute Predictions on the test set (test_features) using softmax_predict
  and softmax_model
"""

# Make predictions
pred = softmax_predict(softmax_model, test_features)

acc = np.mean(test_labels == pred)
print("The Accuracy (with learned features): {:5.2f}% \n".format(acc * 100))
"""
 Accuracy is the proportion of correctly classified images
 The results for our implementation was:
Exemplo n.º 11
0
    # Use this to visually compare the gradients side by side
    print num_grad, grad

    # Compare numerically computed gradients with the ones obtained from backpropagation
    diff = np.linalg.norm(num_grad - grad) / np.linalg.norm(num_grad + grad)
    print diff
    print "Norm of the difference between numerical and analytical num_grad (should be < 1e-7)\n\n"

##======================================================================
## STEP 4: Learning parameters
#
#  Once you have verified that your gradients are correct,
#  you can start training your softmax regression code using softmaxTrain
#  (which uses minFunc).

options_ = {'maxiter': 100, 'disp': True}
opt_theta, input_size, num_classes = softmax.softmax_train(input_size, num_classes,
                                                           lambda_, input_data, labels, options_)

##======================================================================
## STEP 5: Testing
#
#  You should now test your model against the test images.
#  To do this, you will first need to write softmaxPredict
#  (in softmaxPredict.m), which should return predictions
#  given a softmax model and the input data.

test_images = load_MNIST.load_MNIST_images('data/mnist/t10k-images.idx3-ubyte')
test_labels = load_MNIST.load_MNIST_labels('data/mnist/t10k-labels.idx1-ubyte')
predictions = softmax.softmax_predict((opt_theta, input_size, num_classes), test_images)
print "Accuracy: {0:.2f}%".format(100 * np.sum(predictions == test_labels, dtype=np.float64) / test_labels.shape[0])
Exemplo n.º 12
0
X_train, y_train = load_training()
print "Done"

#=============================================================
#   Step 2: 1-Of-K coding scheme

print "One hot encoding..."
T_train = one_hot_encoding(y_train)
del y_train
print "Done"

#=============================================================
#   Step 3: Training softmax
start = time.clock()
print "Training..."
W_optima = softmax_train(X_train, T_train, 2)
print "Done"
print u"Training stage takes :%.4f seconds" % (time.clock() - start)

#=============================================================
#   Step 4: Testing
del X_train, T_train
print "Testing..."
X_test, y_test = load_testing()
Phi_test = add_dummy_variable(X_test)
del X_test

m = np.shape(Phi_test)[1]
k = len(np.unique(y_test))
y_predict = softmax_test(W_optima, Phi_test, m, k)
print "Done"
Exemplo n.º 13
0
# Load pooled features
with open('cnn_pooled_features.pickle', 'r') as f:
    pooled_features_train = pickle.load(f)
    pooled_features_test = pickle.load(f)

# Setup parameters for softmax
softmax_lambda = 1e-4
num_classes = 4

# Reshape the pooled_features to form an input vector for softmax
softmax_images = np.transpose(pooled_features_train, axes=[0, 2, 3, 1])
softmax_images = softmax_images.reshape((softmax_images.size / num_train_images, num_train_images))
softmax_labels = train_labels.flatten() - 1  # Ensure that labels are from 0..n-1 (for n classes)

options_ = {'maxiter': 1000, 'disp': True}
softmax_model = softmax.softmax_train(softmax_images.size / num_train_images, num_classes,
                                      softmax_lambda, softmax_images, softmax_labels, options_)

(softmax_opt_theta, softmax_input_size, softmax_num_classes) = softmax_model


##======================================================================
## STEP 5: Test classifer
#  Now you will test your trained classifer against the test images
softmax_images = np.transpose(pooled_features_test, axes=[0, 2, 3, 1])
softmax_images = softmax_images.reshape((softmax_images.size / num_test_images, num_test_images))
softmax_labels = test_labels.flatten() - 1

predictions = softmax.softmax_predict(softmax_model, softmax_images)
print "Accuracy: {0:.2f}%".format(100 * np.sum(predictions == softmax_labels, dtype=np.float64) / test_labels.shape[0])

# You should expect to get an accuracy of around 80% on the test images.
Exemplo n.º 14
0
    print("Norm of difference = ", diff)

    # The difference should be small.
    # In our implementation, these values are usually less than 1e-7.

    # When your gradients are correct, congratulations!

"""
STEP 4: Learning parameters

  Once you have verified that your gradients are correct,
  you can start training your softmax regression code using softmax_train.
"""

options = {'maxiter': 100, 'disp': True}
model = softmax_train(input_size, n_classes, lambda_, input_data, labels, options)

# Although we only use 100 iterations here to train a classifier for the
# MNIST data set, in practice, training for more iterations is usually
# beneficial.

"""
STEP 5: Testing

  You should now test your model against the test images.
  To do this, you will first need to write softmax_predict,
  which should return predictions given a softmax model and the input data.
"""
images = load_MNIST_images('data/mnist/t10k-images-idx3-ubyte')
labels = load_MNIST_labels('data/mnist/t10k-labels-idx1-ubyte')
input_data = images
Exemplo n.º 15
0
    print 'will calculate l3 features...'
    l3_activations = feedforward_autoencoder(l3_model,
                                             hidden_size,
                                             hidden_size,
                                             l2_activations)
    assert l3_activations.shape == (hidden_size, l2_activations.shape[1])


    np.save('l3.0to9.model', l3_model)

    print 'will train classifier...'
    # train softmax classifier on autoencoded features
    classifier = softmax.softmax_train(hidden_size,
                                    num_classes,
                                    softmax_weight_decay,
                                    l3_activations,
                                    train_labels,
                                    max_iter)
    
    np.save('softmax.0to9.model', classifier)








    # use model to predict
    print 'will load test data...'
    test_patches, test_labels = get_data('../data/mnist.pkl.gz', 
Exemplo n.º 16
0
pooled_features_train = pooled_features_data['pooled_features_train']
pooled_features_test = pooled_features_data['pooled_features_test']

# Setup parameters for softmax
softmax_lambda = 1e-4
n_classes = 4

softmax_input_size = int(pooled_features_train.size / n_train_images)

# Reshape the pooled_features to form an input vector for softmax
softmax_X = np.transpose(pooled_features_train, axes=[0, 2, 3, 1])
softmax_X = softmax_X.reshape((softmax_input_size, n_train_images))
softmax_Y = train_labels

options = {'maxiter': 200, 'disp': True}
softmax_model = softmax_train(softmax_input_size, n_classes, softmax_lambda,
                              softmax_X, softmax_Y, options)
"""
  STEP 5: Test classifer
  Now you will test your trained classifer against the test images

"""
softmax_input_size = int(pooled_features_test.size / n_test_images)

softmax_X = np.transpose(pooled_features_test, axes=[0, 2, 3, 1])
softmax_X = softmax_X.reshape((softmax_input_size, n_test_images))
softmax_Y = test_labels

# Make predictions
pred = softmax_predict(softmax_model, softmax_X)

acc = np.mean(softmax_Y == pred)