コード例 #1
0
def run_softmax_on_MNIST(temp_parameter=1):
    """
    Trains softmax, classifies test data, computes test error, and plots cost function

    Runs softmax_regression on the MNIST training set and computes the test error using
    the test set. It uses the following values for parameters:
    alpha = 0.3
    lambda = 1e-4
    num_iterations = 150

    Saves the final theta to ./theta.pkl.gz

    Returns:
        Final test error
    """
    train_x, train_y, test_x, test_y = get_MNIST_data()
    theta, cost_function_history = softmax_regression(train_x,
                                                      train_y,
                                                      temp_parameter,
                                                      alpha=0.3,
                                                      lambda_factor=1.0e-4,
                                                      k=10,
                                                      num_iterations=150)
    plot_cost_function_over_time(cost_function_history)
    test_error = compute_test_error(test_x, test_y, theta, temp_parameter)
    # Save the model parameters theta obtained from calling softmax_regression to disk.
    write_pickle_data(theta, "./theta.pkl.gz")

    # TODO: add your code here for the "Using the Current Model" question in tab 4.
    #      and print the test_error_mod3
    (train_y_mod3, test_y_mod3) = update_y(train_y, test_y)
    test_error_mod3 = compute_test_error_mod3(test_x, test_y_mod3, theta,
                                              temp_parameter)

    return (test_error, test_error_mod3)
コード例 #2
0
def run_softmax(model,
                train_x,
                train_y,
                temp_parameter,
                k=10,
                plot_cost=False,
                folder_cache='./softmax'):
    """
    Runs softmax_regression on the training set
    
    It uses the following values for parameters:
    alpha = 0.3
    lambda = 1e-4
    num_iterations = 150

    Returns:
        Final theta
    """
    theta = read_pickle_data("{}/softmax_{}_theta_{}.pkl.gz".format(
        folder_cache, model, temp_parameter))
    if theta is None:
        theta, cost_function_history = softmax_regression(train_x,
                                                          train_y,
                                                          temp_parameter,
                                                          alpha=0.3,
                                                          lambda_factor=1.0e-4,
                                                          k=k,
                                                          num_iterations=150)
        # Save the model parameters theta obtained from calling softmax_regression to disk.
        write_pickle_data(
            theta,
            "{}/softmax_{}_theta_{}.pkl.gz".format(folder_cache, model,
                                                   temp_parameter))
        write_pickle_data(
            cost_function_history,
            "{}/softmax_{}_cost_{}.pkl.gz".format(folder_cache, model,
                                                  temp_parameter))
    if plot_cost:
        cost = read_pickle_data("{}/softmax_{}_cost_{}.pkl.gz".format(
            folder_cache, model, temp_parameter))
        plot_cost_function_over_time(cost)
    return theta
コード例 #3
0
def run_softmax_on_MNIST_mod3(temp_parameter=1):
    """
    Trains Softmax regression on digit (mod 3) classifications.

    See run_softmax_on_MNIST for more info.
    """
    #YOUR CODE HERE

    train_x, train_y, test_x, test_y = get_MNIST_data()

    # morph y values to mod3
    (train_y_mod3, test_y_mod3) = update_y(train_y, test_y)
    theta, cost_function_history = softmax_regression(train_x,
                                                      train_y_mod3,
                                                      temp_parameter,
                                                      alpha=0.3,
                                                      lambda_factor=1.0e-4,
                                                      k=10,
                                                      num_iterations=150)
    plot_cost_function_over_time(cost_function_history)
    test_error = compute_test_error(test_x, test_y_mod3, theta, temp_parameter)

    return test_error
コード例 #4
0
    convData = cnn.mergeRow(convData)

    #step5 softmax regression
    inputSize = convData.shape[0]
    valid_featureMap = cnn.convolutionFeatureMap(validating_data, b1,
                                                 convWeight)
    valid_convData = cnn.pooling(valid_featureMap, poolingWeight)
    valid_convData = cnn.mergeRow(valid_convData)
    validator = validation.validator(valid_convData, validating_label,
                                     (numClasses, inputSize))

    #W = softmax.softmax_regression(inputSize,numClasses,0,convData,training_labels,100)
    W = softmax.softmax_regression(inputSize,
                                   numClasses,
                                   0,
                                   convData,
                                   training_label,
                                   400,
                                   validator,
                                   a=1.9)

    #step6 testing
    print 'testing'
    featureMap = cnn.convolutionFeatureMap(test_data, b1, convWeight)
    convData = cnn.pooling(featureMap, poolingWeight)
    convData = cnn.mergeRow(convData)

    theta = W.reshape((numClasses, -1))
    benchmark = loadTestResult()
    predict(theta, convData, benchmark)
    print 'done'
コード例 #5
0
# TODO: First fill out the PCA functions in features.py as the below code depends on them.

n_components = 18
pcs = principal_components(train_x)
train_pca = project_onto_PC(train_x, pcs, n_components)
test_pca = project_onto_PC(test_x, pcs, n_components)
# train_pca (and test_pca) is a representation of our training (and test) data
# after projecting each example onto the first 18 principal components.

# TODO: Train your softmax regression model using (train_pca, train_y)
#       and evaluate its accuracy on (test_pca, test_y).
temp_parameter = 1
theta, cost_function_history = softmax_regression(train_pca,
                                                  train_y,
                                                  temp_parameter,
                                                  alpha=0.3,
                                                  lambda_factor=1.0e-4,
                                                  k=10,
                                                  num_iterations=150)
plot_cost_function_over_time(cost_function_history)
pca_test_error = compute_test_error(test_pca, test_y, theta, temp_parameter)

print('PCA test_error=', pca_test_error)

# TODO: Use the plot_PC function in features.py to produce scatterplot
#       of the first 100 MNIST images, as represented in the space spanned by the
#       first 2 principal components found above.
plot_PC(train_x[range(100), ], pcs, train_y[range(100)])

# TODO: Use the reconstruct_PC function in features.py to show
#       the first and second MNIST images as reconstructed solely from
コード例 #6
0
    fig = plt.figure(2)

    for index in range(hiddenSize/10):
        weight = W1[index,:]
        weight = np.reshape(weight,(28,28))
        ax = fig.add_subplot(5,4,1+index)
        ax.imshow(weight,mpl.cm.gray)

    plt.show()

    #step5 extract features from test & training data
    #TODO move sigmoid to miscellaneous
    labeled_dataset = nnet.normalization(labeled_dataset)
    test_data = nnet.normalization(test_data)
    train_a1 = nnet.sigmoid(W1.dot(labeled_dataset)+b1)
    test_a1 = nnet.sigmoid(W1.dot(test_data)+b1)

    #step 6 softmax regression
    W = softmax.softmax_regression(hiddenSize,numLabels,lmd,train_a1,labeled_labelset,100)

    #step 7 testing
    theta = W.reshape((numLabels, hiddenSize))
    predict = (theta.dot(test_a1)).argmax(0)
    print predict
    print test_labels.flatten()
    accuracy = (predict == test_labels.flatten())
    print 'Accuracy:',accuracy.mean()
    print 'done'
    
    
コード例 #7
0
    #    ax = fig.add_subplot(5,4,1+index)
    #    ax.imshow(weight,mpl.cm.gray)

    #plt.show()


    #step 6 softmax regression
    test_data = nnet.normalization(test_data)

    train_a1 = nnet.sigmoid(W11.dot(training_data)+b11)
    train_a2 = nnet.sigmoid(W21.dot(train_a1)+b21)
    #test_a1 = nnet.sigmoid(W11.dot(training_data)+b11)
    test_a1 = nnet.sigmoid(W11.dot(test_data)+b11)
    test_a2 = nnet.sigmoid(W21.dot(test_a1)+b21)

    W = softmax.softmax_regression(hiddenSizeL2,numClasses,0,train_a2,training_labels,800,a=0.7)
    #W = softmax.softmax_regression(hiddenSizeL1,numClasses,lmd,train_a1,training_labels,100)

    #step 7 testing
    theta = W.reshape((numClasses, hiddenSizeL2))
    predict = (theta.dot(test_a2)).argmax(0)
    #theta = W.reshape((numClasses, hiddenSizeL1))
    #predict = (theta.dot(test_a1)).argmax(0)
    print predict
    print test_labels.flatten()
    accuracy = (predict == test_labels.flatten())
    #accuracy = (predict == training_labels.flatten())
    print 'Accuracy:',accuracy.mean()
    end = time.time()
    print 'time cost: ', end-start
    print 'done'
コード例 #8
0
    imagesize = 50
    convWeight = convolutionWeight(W1, patchsize, imagesize, step)
    featureMap = convolutionFeatureMap(training_set, b1, convWeight)
    # step4 pooling layer
    poolingSize = 2
    poolingCore = 1.0 / math.pow(poolingSize, 2) * np.ones((1, poolingSize * poolingSize))
    featureSize = math.sqrt(featureMap[0].shape[0])
    poolingWeight = convolutionWeight(poolingCore, poolingSize, featureSize, poolingSize)
    poolingWeight = poolingWeight[0]
    convData = pooling(featureMap, poolingWeight)
    convData = mergeRow(convData)

    inputSize = convData.shape[0]

    # step5 softmax regression
    W = softmax.softmax_regression(inputSize, numClasses, 0.003, convData, training_labels, 100)

    # step6 validation & testing
    inputSize = convData.shape[0]
    theta = W.reshape((numClasses, inputSize))

    print "validation"
    predict(theta, convData, training_labels)

    print "testing"
    featureMap = convolutionFeatureMap(testing_set, b1, convWeight)
    convData = pooling(featureMap, poolingWeight)
    convData = mergeRow(convData)
    # convData = mergeRow(featureMap)
    predict(theta, convData, testing_lables)
    print "done"
コード例 #9
0
    #step4 pooling layer
    poolingSize = 2
    poolingCore = 1.0/math.pow(poolingSize, 2) * np.ones((1, poolingSize*poolingSize))
    featureSize = math.sqrt(featureMap[0].shape[0])
    poolingWeight = cnn.convolutionWeight(poolingCore, poolingSize, featureSize, poolingSize)
    poolingWeight = poolingWeight[0]
    convData = cnn.pooling(featureMap, poolingWeight)
    convData = cnn.mergeRow(convData)

    #step5 softmax regression 
    inputSize = convData.shape[0]
    valid_featureMap = cnn.convolutionFeatureMap(validating_data, b1, convWeight)
    valid_convData = cnn.pooling(valid_featureMap, poolingWeight)
    valid_convData = cnn.mergeRow(valid_convData)
    validator = validation.validator(valid_convData, validating_label, (numClasses, inputSize))

    #W = softmax.softmax_regression(inputSize,numClasses,0,convData,training_labels,100)
    W = softmax.softmax_regression(inputSize,numClasses,0,convData,training_label,400,validator,a=1.9)

    #step6 testing
    print 'testing'
    featureMap = cnn.convolutionFeatureMap(test_data, b1, convWeight)
    convData = cnn.pooling(featureMap, poolingWeight)
    convData = cnn.mergeRow(convData)

    theta = W.reshape((numClasses, -1))
    benchmark = loadTestResult()
    predict(theta, convData, benchmark)
    print 'done'
コード例 #10
0
    #plt.show()

    #step 6 softmax regression
    test_data = nnet.normalization(test_data)

    train_a1 = nnet.sigmoid(W11.dot(training_data) + b11)
    train_a2 = nnet.sigmoid(W21.dot(train_a1) + b21)
    #test_a1 = nnet.sigmoid(W11.dot(training_data)+b11)
    test_a1 = nnet.sigmoid(W11.dot(test_data) + b11)
    test_a2 = nnet.sigmoid(W21.dot(test_a1) + b21)

    W = softmax.softmax_regression(hiddenSizeL2,
                                   numClasses,
                                   0,
                                   train_a2,
                                   training_labels,
                                   800,
                                   a=0.7)
    #W = softmax.softmax_regression(hiddenSizeL1,numClasses,lmd,train_a1,training_labels,100)

    #step 7 testing
    theta = W.reshape((numClasses, hiddenSizeL2))
    predict = (theta.dot(test_a2)).argmax(0)
    #theta = W.reshape((numClasses, hiddenSizeL1))
    #predict = (theta.dot(test_a1)).argmax(0)
    print predict
    print test_labels.flatten()
    accuracy = (predict == test_labels.flatten())
    #accuracy = (predict == training_labels.flatten())
    print 'Accuracy:', accuracy.mean()
コード例 #11
0
    imagesize = 50
    convWeight = convolutionWeight(W1, patchsize, imagesize, step)
    featureMap = convolutionFeatureMap(training_set, b1, convWeight)
    #step4 pooling layer
    poolingSize = 2
    poolingCore = 1.0/math.pow(poolingSize, 2) * np.ones((1, poolingSize*poolingSize))
    featureSize = math.sqrt(featureMap[0].shape[0])
    poolingWeight = convolutionWeight(poolingCore, poolingSize, featureSize, poolingSize)
    poolingWeight = poolingWeight[0]
    convData = pooling(featureMap, poolingWeight)
    convData = mergeRow(convData)

    inputSize = convData.shape[0]

    #step5 softmax regression 
    W = softmax.softmax_regression(inputSize,numClasses,0.003,convData,training_labels,100)

    #step6 validation & testing
    inputSize = convData.shape[0]
    theta = W.reshape((numClasses, inputSize))

    print 'validation'
    predict(theta, convData, training_labels)

    print 'testing'
    featureMap = convolutionFeatureMap(testing_set, b1, convWeight)
    convData = pooling(featureMap, poolingWeight)
    convData = mergeRow(convData)
    #convData = mergeRow(featureMap)
    predict(theta, convData, testing_lables)
    print 'done'