示例#1
0
def main():

    train_data, train_label, test_data, test_label = mnist()

    learning_rate = 0.1
    num_iterations = 2000

    w = np.zeros((train_data.shape[0], 1))

    b = 0


    parameters, gradients, costs = optimizer(train_data, \
                    train_label, w, b, learning_rate, num_iterations)
    w = parameters["w"]
    b = parameters["b"]

    train_Pred = classify(train_data, w, b)
    test_Pred = classify(test_data, w, b)

    temp = np.equal(train_Pred, train_label)
    temp2 = np.equal(test_Pred, test_label)

    trAcc = (np.sum(temp) / train_label.shape[1]) * 100
    teAcc = (np.sum(temp2) / test_label.shape[1]) * 100
    print("Accuracy for training set is {} %".format(trAcc))
    print("Accuracy for testing set is {} %".format(teAcc))

    h = np.arange(1, 201)

    plt.plot(h, costs)
    plt.savefig("Error vs iterations")
def main():
    # getting the sbuset dataset from MNIST
    train_data, train_label, test_data, test_label = mnist()

    # initialize learning rate and num_iterations
    learning_rate = 0.1
    num_iterations = 2000

    # initialize w as array (d,1) and b as a scalar'

    w = np.zeros((train_data.shape[0], 1))
    b = 0

    # learning the weights by using optimize function
    parameters, gradients, costs = optimizer(train_data, \
                    train_label, w, b, learning_rate, num_iterations)
    w = parameters["w"]
    b = parameters["b"]
    # compute the accuracy for training set and testing set
    train_Pred = classify(train_data, w, b)
    test_Pred = classify(test_data, w, b)

    trAcc = 100 - np.mean(np.abs(train_Pred - train_data)) * 100
    teAcc = 100 - np.mean(np.abs(test_Pred - test_data)) * 100
    print("Accuracy for training set is {} %".format(trAcc))
    print("Accuracy for testing set is {} %".format(teAcc))
示例#3
0
def main():
    # getting the sbuset dataset from MNIST
    train_data, train_label, test_data, test_label = mnist()

    # initialize learning rate and num_iterations
    learning_rate = 0.1
    num_iterations = 2000

    # initialize w as array (d,1) and b as a scalar
    w = np.random.randint(5, size=(train_data.shape[0], 1))
    b = 1

    # learning the weights by using optimize function
    parameters, gradients, costs = optimizer(train_data, \
                    train_label, w, b, learning_rate, num_iterations)
    w = parameters["w"]
    b = parameters["b"]
    # compute the accuracy for training set and testing set
    train_Pred = classify(train_data, w, b)
    test_Pred = classify(test_data, w, b)
    trAcc = np.sum(train_Pred == train_label) / float(len(train_Pred[0])) * 100
    teAcc = np.sum(test_Pred == test_label) / float(len(test_Pred[0])) * 100
    print("Accuracy for training set is {} %".format(trAcc))
    print("Accuracy for testing set is {} %".format(teAcc))

    plt.plot(range(1, len(costs) + 1), costs, label='Cost')
    plt.xlabel('Iterations')
    plt.ylabel('Cost')
    plt.title('Mini Project1:1.3 Logistic Regression-Cost vs Iterations')
    plt.legend()
    plt.show()
示例#4
0
def main():
    # getting the sbuset dataset from MNIST
    train_data, train_label, test_data, test_label = mnist()

    # initialize learning rate and num_iterations
    learning_rate = 0.1
    num_iterations = 2000

    # initialize w as array (d,1) and b as a scalar
    w = np.zeros((train_data.shape[0], 1))
    b = 0

    # learning the weights by using optimize function
    parameters, gradients, costs = optimizer(train_data, \
                    train_label, w, b, learning_rate, num_iterations)
    w = parameters["w"]
    b = parameters["b"]
    # compute the accuracy for training set and testing set
    train_Pred = classify(train_data, w, b)
    test_Pred = classify(test_data, w, b)

    #print(train_label.shape)
    #print(np.sum(train_Pred == train_label))
    #np.sum((np.abs(train_Pred == train_label))) / train_Pred.shape[1]
    #np.sum((np.abs(test_Pred == test_label))) / train_Pred.shape[1]

    trAcc = 100 - (np.sum(
        (np.abs(train_Pred - train_label))) / train_Pred.shape[1]) * 100
    teAcc = 100 - (np.sum(
        (np.abs(test_Pred - test_label))) / train_Pred.shape[1]) * 100
    print("Accuracy for training set is {} %".format(trAcc))
    print("Accuracy for testing set is {} %".format(teAcc))
    plt.plot(costs)
    plt.xlabel('iteration')
    plt.ylabel('error')
    plt.savefig('error_plot.png')