Example #1
0
def finetune_sae(X, y, weights, finetune_iter, alpha, decay):
    """TODO: Docstring for finetune_sae.

    :arg1: TODO
    :returns: TODO

    """
    m = X.shape[0]
    t_x = T.matrix(name='x')
    t_y = T.matrix(name='x')
    pred = sae_extract(t_x, weights[:-1])
    pred = softmax_predict(pred, *weights[-1]) # weights[-1][0], weights[-1][1])
    cost = cost4softmax(pred, t_y, m, decay, weights[-1][0])

    unroll = []
    for hp in weights:
        unroll.append(hp[0])
        unroll.append(hp[1])
    grad = T.grad(cost, unroll)

    trainit = init_gd_trainer(inputs=[t_x, t_y], outputs=[pred, cost],
                              name='trainit', params=unroll,
                              grad=grad, alpha=alpha)

    for i in range(finetune_iter):
        pred, err = trainit(X, y)
        if i%100 == 0:
            print 'iter: %f, err: %f\n' % (i, err)

    return [(unroll[2*idx], unroll[2*idx+1]) for idx in range(len(unroll)/2)]
Example #2
0
def testClassifier(model,images,labels):
    """
    函数功能:测试softmax分类器的性能
    """
    # 对数据的维度进行重构以适应softmax的测试接口
    n_test_images = images.shape[1]
    softmax_images = np.transpose(images, axes = [0,2,3,1])
    softmax_images = softmax_images.reshape((int(softmax_images.size / n_test_images), n_test_images))
    softmax_labels = labels.flatten() - 1
    predictions = softmax.softmax_predict(model, softmax_images)
    accuracy = 100 * np.sum(predictions == softmax_labels, dtype=np.float64) / labels.shape[0]
    print("Accuracy: {0:.2f}%".format(accuracy))
Example #3
0
def sae_predict(X, weights):
    """TODO: Docstring for sae_predict.
    :returns: TODO

    """
    inp = T.matrix(name='inp')
    for idx, hp in enumerate(weights):
        if idx == 0:
            res = ae_encode(inp, hp[0], hp[1])
        elif idx != len(weights)-1:
            res = ae_encode(res, hp[0], hp[1])
        else:
            res = softmax_predict(res, hp[0], hp[1])

    f = function(inputs=[inp,], outputs=[res,], name='f')

    return f(X)[0]
def stacked_ae_predict(theta, input_size, hidden_size, n_classes, net_config,
                       data):
    """
    theta: optimal theta
    input_size:  the number of input units
    hidden_size: the number of hidden units *at the 2nd layer*
    n_classes:   the number of categories
    net_config:  the network configuration of the stack
    data: our matrix containing the testing data as columns.  So, data[:,i] is the i-th training example.

    pred: the prediction array.
    """

    # We first extract the part which compute the softmax gradient
    softmax_theta = theta[0:hidden_size * n_classes].reshape(
        (n_classes, hidden_size))

    # Extract out the "stack"
    stack = params2stack(theta[hidden_size * n_classes:], net_config)

    # Number of examples
    m = data.shape[1]

    # Forword pass
    z = [np.zeros(1)]
    a = [data]
    for s in stack:
        z.append(s['w'].dot(a[-1]) + s['b'].reshape((-1, 1)))
        a.append(sigmoid(z[-1]))

    learned_features = a[-1]

    # Softmax model
    model = {}
    model['opt_theta'] = softmax_theta
    model['n_classes'] = n_classes
    model['input_size'] = hidden_size

    # Make predictions
    pred = softmax_predict(model, learned_features)

    return pred
def stacked_ae_predict(theta, input_size, hidden_size,
                       n_classes, net_config, data):
    """
    theta: optimal theta
    input_size:  the number of input units
    hidden_size: the number of hidden units *at the 2nd layer*
    n_classes:   the number of categories
    net_config:  the network configuration of the stack
    data: our matrix containing the testing data as columns.  So, data[:,i] is the i-th training example.

    pred: the prediction array.
    """

    # We first extract the part which compute the softmax gradient
    softmax_theta = theta[0:hidden_size*n_classes].reshape((n_classes, hidden_size))

    # Extract out the "stack"
    stack = params2stack(theta[hidden_size*n_classes:], net_config)

    # Number of examples
    m = data.shape[1]

    # Forword pass
    z = [np.zeros(1)]
    a = [data]
    for s in stack:
        z.append(s['w'].dot(a[-1]) + s['b'].reshape((-1, 1)) )
        a.append(sigmoid(z[-1]))

    learned_features = a[-1]

    # Softmax model
    model = {}
    model['opt_theta']  = softmax_theta
    model['n_classes']  = n_classes
    model['input_size'] = hidden_size

    # Make predictions
    pred = softmax_predict(model, learned_features)

    return pred
def stacked_autoencoder_predict(theta, input_size, hidden_size, num_classes, net_config, data):
    """
    Takes a trained theta and a test data set,
    and returns the predicted labels for each example
    :param theta: trained weights from the autoencoder
    :param input_size: the number of input units
    :param hidden_size: the number of hidden units at the layer before softmax
    :param num_classes: the number of categories
    :param netconfig: network configuration of the stack
    :param data: the matrix containing the training data as columsn. data[:,i-1] is the i-th training example
    :return:

    Your code should produce the prediction matrix
    pred, where pred(i) is argmax_c P(y(c) | x(i)).
    """

    ## Unroll theta parameter
    # We first extract the part which compute the softmax gradient
    softmax_theta = theta[0:hidden_size * num_classes].reshape(num_classes, hidden_size)

    # Extract out the "stack"
    stack = params2stack(theta[hidden_size * num_classes:], net_config)

    m = data.shape[1]

    # Compute predictions
    a = [data]
    z = [np.array(0)]  # Dummy value

    # Sparse Autoencoder Computation
    for s in stack:
        z.append(s['w'].dot(a[-1]) + np.tile(s['b'], (m, 1)).transpose())
        a.append(sigmoid(z[-1]))

    # Softmax
    pred = softmax.softmax_predict((softmax_theta, hidden_size, num_classes), a[-1])

    return pred
Example #7
0
W1 = opt_theta[0:hidden_size * input_size].reshape(hidden_size, input_size).transpose()
display_network.display_network(W1)

##======================================================================
## STEP 3: Extract Features from the Supervised Dataset
#
#  You need to complete the code in feedForwardAutoencoder.m so that the
#  following command will extract features from the data.

train_features = sparse_autoencoder.sparse_autoencoder(opt_theta, hidden_size,
                                                       input_size, train_data)

test_features = sparse_autoencoder.sparse_autoencoder(opt_theta, hidden_size,
                                                      input_size, test_data)

##======================================================================
## STEP 4: Train the softmax classifier

lambda_ = 1e-4
options_ = {'maxiter': 400, 'disp': True}

opt_theta, input_size, num_classes = softmax.softmax_train(hidden_size, num_labels,
                                                           lambda_, train_features,
                                                           train_labels, options_)

##======================================================================
## STEP 5: Testing

predictions = softmax.softmax_predict((opt_theta, input_size, num_classes), test_features)
print "Accuracy: {0:.2f}%".format(100 * np.sum(predictions == test_labels, dtype=np.float64) / test_labels.shape[0])
Example #8
0
# Although we only use 100 iterations here to train a classifier for the
# MNIST data set, in practice, training for more iterations is usually
# beneficial.
"""
STEP 5: Testing

  You should now test your model against the test images.
  To do this, you will first need to write softmax_predict,
  which should return predictions given a softmax model and the input data.
"""
images = load_MNIST_images('data/mnist/t10k-images-idx3-ubyte')
labels = load_MNIST_labels('data/mnist/t10k-labels-idx1-ubyte')
input_data = images

# Make predictions
pred = softmax_predict(model, input_data)

acc = np.mean(labels == pred)
print("Accuracy: {:5.2f}% \n".format(acc * 100))
"""
Accuracy is the proportion of correctly classified images
After 100 iterations, the results for our implementation were:

Accuracy: 92.200%

If your values are too low (accuracy less than 0.91), you should check
your code for errors, and make sure you are training on the
entire data set of 60000 28x28 training images
(unless you modified the loading code, this should be the case)
"""
Example #9
0
    print 'will train classifier...'
    # train softmax classifier on autoencoded features
    classifier = softmax.softmax_train(hidden_size, num_classes,
                                       softmax_weight_decay, l3_activations,
                                       train_labels, max_iter)

    np.save('softmax.0to9.model', classifier)

    # use model to predict
    print 'will load test data...'
    test_patches, test_labels = get_data('../data/mnist.pkl.gz',
                                         train=False,
                                         num_samples=num_samples)

    print 'will compute test features...'
    test_l2_activations = feedforward_autoencoder(l2_model, hidden_size,
                                                  visible_size, test_patches)
    assert test_l2_activations.shape == (hidden_size, test_patches.shape[1])

    print 'will compute test features...'
    test_l3_activations = feedforward_autoencoder(l3_model, hidden_size,
                                                  hidden_size,
                                                  test_l2_activations)
    assert test_l3_activations.shape == (hidden_size, test_patches.shape[1])

    print 'will predict labels...'
    predicted_labels = softmax.softmax_predict(classifier, test_l3_activations)
    assert len(predicted_labels) == len(test_labels)
    print 'accuracy', 100 * np.mean(predicted_labels == test_labels)
    # 98.6 % accuracy!
Example #10
0

    # use model to predict
    print 'will load test data...'
    test_patches, test_labels = get_data('../data/mnist.pkl.gz', 
                                         train=False, 
                                         num_samples=num_samples)

    print 'will compute test features...'
    test_l2_activations = feedforward_autoencoder(l2_model, 
                                                    hidden_size, 
                                                    visible_size, 
                                                    test_patches)
    assert test_l2_activations.shape == (hidden_size, test_patches.shape[1])

    print 'will compute test features...'
    test_l3_activations = feedforward_autoencoder(l3_model,
                                                    hidden_size,
                                                    hidden_size,
                                                    test_l2_activations)
    assert test_l3_activations.shape == (hidden_size, test_patches.shape[1])
    

    print 'will predict labels...'
    predicted_labels = softmax.softmax_predict(classifier, test_l3_activations)
    assert len(predicted_labels) == len(test_labels)
    print 'accuracy', 100 * np.mean(predicted_labels == test_labels)
    # 98.6 % accuracy!


Example #11
0
  You need to compute softmax_model using softmax_train on train_features and
  train_labels
"""

lambda_ = 1e-4 # weight decay parameter
options = {'maxiter': maxiter, 'disp': True}
softmax_model = softmax_train(hidden_size, n_labels, lambda_, train_features, train_labels, options)
"""
STEP 5: Testing

  Compute Predictions on the test set (test_features) using softmax_predict
  and softmax_model
"""

# Make predictions
pred = softmax_predict(softmax_model, test_features)

acc = np.mean(test_labels == pred)
print("The Accuracy (with learned features): {:5.2f}% \n".format(acc*100))

"""
 Accuracy is the proportion of correctly classified images
 The results for our implementation was:

 Accuracy: 98.3%
"""

# As a comparison, when raw pixels are used (instead of the learned features),
# we obtained a test accuracy of only around 96% (for the same train and test sets).

softmax_model = softmax_train(input_size, n_labels, lambda_, train_data, train_labels, options)
Example #12
0
  train_labels
"""

lambda_ = 1e-4  # weight decay parameter
options = {'maxiter': maxiter, 'disp': True}
softmax_model = softmax_train(hidden_size, n_labels, lambda_, train_features,
                              train_labels, options)
"""
STEP 5: Testing

  Compute Predictions on the test set (test_features) using softmax_predict
  and softmax_model
"""

# Make predictions
pred = softmax_predict(softmax_model, test_features)

acc = np.mean(test_labels == pred)
print("The Accuracy (with learned features): {:5.2f}% \n".format(acc * 100))
"""
 Accuracy is the proportion of correctly classified images
 The results for our implementation was:

 Accuracy: 98.3%
"""

# As a comparison, when raw pixels are used (instead of the learned features),
# we obtained a test accuracy of only around 96% (for the same train and test sets).

softmax_model = softmax_train(input_size, n_labels, lambda_, train_data,
                              train_labels, options)
Example #13
0
    pooled_features_train = pickle.load(f)
    pooled_features_test = pickle.load(f)

# Setup parameters for softmax
softmax_lambda = 1e-4
num_classes = 4

# Reshape the pooled_features to form an input vector for softmax
softmax_images = np.transpose(pooled_features_train, axes=[0, 2, 3, 1])
softmax_images = softmax_images.reshape((softmax_images.size / num_train_images, num_train_images))
softmax_labels = train_labels.flatten() - 1  # Ensure that labels are from 0..n-1 (for n classes)

options_ = {'maxiter': 1000, 'disp': True}
softmax_model = softmax.softmax_train(softmax_images.size / num_train_images, num_classes,
                                      softmax_lambda, softmax_images, softmax_labels, options_)

(softmax_opt_theta, softmax_input_size, softmax_num_classes) = softmax_model


##======================================================================
## STEP 5: Test classifer
#  Now you will test your trained classifer against the test images
softmax_images = np.transpose(pooled_features_test, axes=[0, 2, 3, 1])
softmax_images = softmax_images.reshape((softmax_images.size / num_test_images, num_test_images))
softmax_labels = test_labels.flatten() - 1

predictions = softmax.softmax_predict(softmax_model, softmax_images)
print "Accuracy: {0:.2f}%".format(100 * np.sum(predictions == softmax_labels, dtype=np.float64) / test_labels.shape[0])

# You should expect to get an accuracy of around 80% on the test images.
Example #14
0
# MNIST data set, in practice, training for more iterations is usually
# beneficial.

"""
STEP 5: Testing

  You should now test your model against the test images.
  To do this, you will first need to write softmax_predict,
  which should return predictions given a softmax model and the input data.
"""
images = load_MNIST_images('data/mnist/t10k-images-idx3-ubyte')
labels = load_MNIST_labels('data/mnist/t10k-labels-idx1-ubyte')
input_data = images

# Make predictions
pred = softmax_predict(model, input_data)

acc = np.mean(labels == pred)
print("Accuracy: {:5.2f}% \n".format(acc*100))

"""
Accuracy is the proportion of correctly classified images
After 100 iterations, the results for our implementation were:

Accuracy: 92.200%

If your values are too low (accuracy less than 0.91), you should check
your code for errors, and make sure you are training on the
entire data set of 60000 28x28 training images
(unless you modified the loading code, this should be the case)
"""
Example #15
0
                                    num_classes, 
                                    weight_decay, 
                                    train_activations,
                                    train_labels,
                                    max_iter)
    
    np.save('softmax.0to4.model', trained)
 
    # use model to predict
    print 'will load test data...'
    test_patches, test_labels = get_data('../data/mnist.pkl.gz', 
                                         lambda l: l <= 4, 
                                         train=False, 
                                         num_samples=100000)

    print 'will compute test features...'
    test_activations = feedforward_autoencoder(feature_model, 
                                               hidden_size, 
                                               visible_size, 
                                               test_patches)
    assert test_activations.shape == (hidden_size, test_patches.shape[1])
    

    print 'will predict labels...'
    predicted_labels = softmax.softmax_predict(trained, test_activations)
    assert len(predicted_labels) == len(test_labels)
    print 'accuracy', 100 * np.mean(predicted_labels == test_labels)
    # 98.6 % accuracy!


Example #16
0
softmax_input_size = int(pooled_features_train.size / n_train_images)

# Reshape the pooled_features to form an input vector for softmax
softmax_X = np.transpose(pooled_features_train, axes=[0, 2, 3, 1])
softmax_X = softmax_X.reshape((softmax_input_size, n_train_images))
softmax_Y = train_labels

options = {'maxiter': 200, 'disp': True}
softmax_model = softmax_train(softmax_input_size,
                              n_classes, softmax_lambda, softmax_X, softmax_Y, options)


"""
  STEP 5: Test classifer
  Now you will test your trained classifer against the test images

"""
softmax_input_size = int(pooled_features_test.size / n_test_images)

softmax_X = np.transpose(pooled_features_test, axes=[0, 2, 3, 1])
softmax_X = softmax_X.reshape((softmax_input_size, n_test_images))
softmax_Y = test_labels

# Make predictions
pred = softmax_predict(softmax_model, softmax_X)

acc = np.mean(softmax_Y == pred)
print("Accuracy: {:5.2f}% \n".format(acc*100))

# You should expect to get an accuracy of around 80% on the test images.
Example #17
0
n_classes = 4

softmax_input_size = int(pooled_features_train.size / n_train_images)

# Reshape the pooled_features to form an input vector for softmax
softmax_X = np.transpose(pooled_features_train, axes=[0, 2, 3, 1])
softmax_X = softmax_X.reshape((softmax_input_size, n_train_images))
softmax_Y = train_labels

options = {'maxiter': 200, 'disp': True}
softmax_model = softmax_train(softmax_input_size, n_classes, softmax_lambda,
                              softmax_X, softmax_Y, options)
"""
  STEP 5: Test classifer
  Now you will test your trained classifer against the test images

"""
softmax_input_size = int(pooled_features_test.size / n_test_images)

softmax_X = np.transpose(pooled_features_test, axes=[0, 2, 3, 1])
softmax_X = softmax_X.reshape((softmax_input_size, n_test_images))
softmax_Y = test_labels

# Make predictions
pred = softmax_predict(softmax_model, softmax_X)

acc = np.mean(softmax_Y == pred)
print("Accuracy: {:5.2f}% \n".format(acc * 100))

# You should expect to get an accuracy of around 80% on the test images.