예제 #1
0
def main():
    digits = mnist()
    mynvb = nvb()
    x = center_matrix_SVD(digits.train_Images)
    mynvb.fit(digits.train_Images,digits.train_Labels)
    labels = mynvb.predict(digits.test_Images)
    errors_Full, error_Full_index = class_error_rate(labels,digits.test_Labels)
    mynvb.fit(x.PCA[:,:154],digits.train_Labels)
    newtest = (digits.test_Images -x.centers)@np.transpose(x.V[:154,:])
    labels = mynvb.predict(newtest)
    errors_154, error_Full_index = class_error_rate(labels,digits.test_Labels)
    mynvb.fit(digits.train_Images,digits.train_Labels)
    mynvb.fit(x.PCA[:,:50],digits.train_Labels)
    newtest = (digits.test_Images -x.centers)@np.transpose(x.V[:50,:])
    labels = mynvb.predict(newtest)
    errors_50, error_Full_index = class_error_rate(labels,digits.test_Labels)
    mynvb.fit(digits.train_Images,digits.train_Labels)
    mynvb.fit(x.PCA[:,:70],digits.train_Labels)
    newtest = (digits.test_Images -x.centers)@np.transpose(x.V[:70,:])
    labels = mynvb.predict(newtest)
    errors_70, error_Full_index = class_error_rate(labels,digits.test_Labels)
    print(errors_Full)
    print(errors_154)
    print(errors_50)
    print(errors_70)
    prob3_plots(mynvb,digits,newtest,pc=0)
    prob3_plots(mynvb,digits,newtest,pc=1)
    prob3_plots(mynvb,digits,newtest,pc=2)
    prob3_plots(mynvb,digits,newtest,pc=3)
예제 #2
0
def main():
    digits = mnist() # Creates a class with our mnist images and labels
    if open('Training SVD Data','rb')._checkReadable() == 0: # Check if file exist create it if it doesn't
        print("im here")
        x = center_matrix_SVD(digits.train_Images) # Creates a class with our svd and associated info
        pickle.dump(x,open('Training SVD Data','wb'))
    else:
        x = pickle.load(open('Training SVD Data','rb'))  # If we already have the file just load it
    if 0:
        test_Images_Center = np.subtract(digits.test_Images,np.repeat(x.centers,digits.test_Images.shape[0],0))
        tic()
        labels = local_kmeans_class(x.PCA[:,:50],digits.train_Labels,[email protected](x.V[:50,:]),10)
        toc()
        pickle.dump(labels,open('Loc_kmeans_50_lab','wb'))
    loc_full = pickle.load(open('Loc_kmeans_Full_lab','rb'))
    loc_50 = pickle.load(open('Loc_kmeans_50_lab','rb'))
    labels_Full = pickle.load(open('KNN_Full','rb'))
    # Have to transpose these because they came out backwards should fix if i use this agian
    errors_full,ind_full = class_error_rate(np.transpose(loc_full),digits.test_labels)
    errors_50,ind_50 = class_error_rate(np.transpose(loc_50),digits.test_labels)
    errors_near,ind_50 = class_error_rate(labels_Full,digits.test_labels)
    plt.figure()
    plt.plot(np.arange(10)+1, errors_full, color='Green', marker='o', markersize=10, label='Full')  #plots the 82.5%
    plt.plot(np.arange(10)+1, errors_50, color='Yellow', marker='o', markersize=10, label='82.5%')
    plt.plot(np.arange(10)+1, errors_near, color='Blue', marker='o', markersize=10, label='kNN')
    plt.grid(1) # Turns the grid on
    plt.title('Plot of local KNN Error rates')
    plt.legend(loc='upper right') # Puts a legend on the plot
    plt.show()
예제 #3
0
def main():
    digits = mnist()  # Creates a class with our mnist images and labels
    if open('Training SVD Data', 'rb')._checkReadable(
    ) == 0:  # Check if file exist create it if it doesn't
        print("im here")  # Just wanted to check if it was going in here
        x = center_matrix_SVD(
            digits.train_Images
        )  # Creates a class with our svd and associated info
        pickle.dump(x, open('Training SVD Data', 'wb'))
    else:
        x = pickle.load(open('Training SVD Data',
                             'rb'))  # If we already have the file just load it
    if 0:  # if this is zero skip
        test_Images_Center = np.subtract(
            digits.test_Images,
            np.repeat(x.centers, digits.test_Images.shape[0], 0))
        tic()
        myLDA = LDA()  # Create a new instance of the LDA class
        new_train = myLDA.fit_transform(
            x.PCA[:, :154], digits.train_Labels)  # It will fit based on x.PCA
        new_test = myLDA.transform(test_Images_Center @ np.transpose(
            x.V[:154, :]))  # get my transformed test dataset
        Knn_labels, nearest = KNN(new_train, digits.train_Labels, new_test,
                                  10)  # Run kNN on the new data
        toc()
        pickle.dump(Knn_labels, open('FDAKNN_Lables', 'wb'))
        pickle.dump(nearest, open('FDAKNN_neastest', 'wb'))
    fda = pickle.load(open('FDAKNN_Lables', 'rb'))
    labels_Full = pickle.load(open('KNN_Full', 'rb'))
    labels_50 = pickle.load(open('KNN_50', 'rb'))
    errors_fda, ind_fda = class_error_rate(fda, digits.test_labels)
    errors_near, ind_near = class_error_rate(labels_Full, digits.test_labels)
    errors_50, ind_50 = class_error_rate(labels_50, digits.test_labels)
    plt.figure()
    plt.plot(np.arange(10) + 1,
             errors_fda,
             color='Green',
             marker='o',
             markersize=10,
             label='fda')  #plots the 82.5%
    plt.plot(np.arange(10) + 1,
             errors_near,
             color='Blue',
             marker='o',
             markersize=10,
             label='kNN')
    plt.plot(np.arange(10) + 1,
             errors_50,
             color='Yellow',
             marker='o',
             markersize=10,
             label='kNN 50')
    plt.grid(1)  # Turns the grid on
    plt.title('Plot of Knn with FDA Error rates')
    plt.legend(loc='upper right')  # Puts a legend on the plot
    plt.show()
    print(confusion_matrix(digits.test_labels, labels_Full[5]))
    print(confusion_matrix(digits.test_labels, fda[5]))
    print(confusion_matrix(digits.test_labels, labels_50[5]))
    """
예제 #4
0
def confusion(digits):
    myLDA = LDA()
    x = center_matrix_SVD(digits.train_Images)
    myLDA.fit(x.PCA[:,:50],digits.train_Labels)
    newtest = digits.test_Images -x.centers
    [email protected](x.V[:50,:])
    labels = myLDA.predict(newtest)
    import sklearn.metrics as f
    print(f.confusion_matrix(digits.test_Labels,labels))
예제 #5
0
def main():
    digits = mnist()  # Creates a class with our mnist images and labels
    if open('Training SVD Data', 'rb')._checkReadable(
    ) == 0:  # Check if file exist create it if it doesn't
        print("im here")
        x = center_matrix_SVD(
            digits.train_Images
        )  # Creates a class with our svd and associated info
        pickle.dump(x, open('Training SVD Data', 'wb'))
    else:
        x = pickle.load(open('Training SVD Data',
                             'rb'))  # If we already have the file just load it
    if 0:
        test_Images_Center = np.subtract(
            digits.test_Images,
            np.repeat(x.centers, digits.test_Images.shape[0], 0))
        tic()
        labels = local_kmeans_class(
            x.PCA[:, :50], digits.train_Labels,
            test_Images_Center @ np.transpose(x.V[:50, :]), 10)
        toc()
        pickle.dump(labels, open('Loc_kmeans_50_lab', 'wb'))
    loc_full = pickle.load(open('Loc_kmeans_Full_lab', 'rb'))
    loc_50 = pickle.load(open('Loc_kmeans_50_lab', 'rb'))
    labels_Full = pickle.load(open('KNN_Full', 'rb'))
    # Have to transpose these because they came out backwards should fix if i use this agian
    errors_full, ind_full = class_error_rate(np.transpose(loc_full),
                                             digits.test_labels)
    errors_50, ind_50 = class_error_rate(np.transpose(loc_50),
                                         digits.test_labels)
    errors_near, ind_50 = class_error_rate(labels_Full, digits.test_labels)
    plt.figure()
    plt.plot(np.arange(10) + 1,
             errors_full,
             color='Green',
             marker='o',
             markersize=10,
             label='Full')  #plots the 82.5%
    plt.plot(np.arange(10) + 1,
             errors_50,
             color='Yellow',
             marker='o',
             markersize=10,
             label='82.5%')
    plt.plot(np.arange(10) + 1,
             errors_near,
             color='Blue',
             marker='o',
             markersize=10,
             label='kNN')
    plt.grid(1)  # Turns the grid on
    plt.title('Plot of local KNN Error rates')
    plt.legend(loc='upper right')  # Puts a legend on the plot
    plt.show()
예제 #6
0
def main():
    digits = mnist() # Creates a class with our mnist images and labels
    if open('Training SVD Data','rb')._checkReadable() == 0: # Check if file exist create it if it doesn't
        print("im here")
        x = center_matrix_SVD(digits.train_Images) # Creates a class with our svd and associated info
        pickle.dump(x,open('Training SVD Data','wb'))
    else:
        x = pickle.load(open('Training SVD Data','rb'))
    if 0:  # change to 1 if you want to rerun the knn stuff
        do_KNN(x,digits)
    KNN_Plots(x,digits)
예제 #7
0
def main():
    digits = mnist()
    x = center_matrix_SVD(digits.train_Images)
    errors_154 = doLDA(x,digits,154)
    pickle.dump(errors_154,open('LDA_154.p','wb'))
    errors_50 = doLDA(x,digits,50)
    pickle.dump(errors_50,open('LDA_50.p','wb'))
    errors_10 = doLDA(x,digits,10)
    pickle.dump(errors_10,open('LDA_10.p','wb'))
    errors_60 = doLDA(x,digits,60)
    pickle.dump(errors_60,open('LDA_60.p','wb'))
    prob1_plots(digits)
    put_into_excel(digits)
예제 #8
0
def main(): # Our main function
    digits = mnist() # Creates a class with our mnist images and labels
    if open('Training SVD Data','rb')._checkReadable() == 0: # Check if file exist create it if it doesn't
        print("im here")
        x = center_matrix_SVD(digits.train_Images) # Creates a class with our svd and associated info
        pickle.dump(x,open('Training SVD Data','wb'))
    else:
        x = pickle.load(open('Training SVD Data','rb'))
    if 0: # change 0 to 1 if you want to run this agian
        merror = mfoldX(x.PCA[:,:],digits.train_Labels,6,10) # Run X-validation and return error rates for the full dataset
        pickle.dump(merror,open('MFoldErrors','wb')) # Put our error rates in a file
        merror = mfoldX(x.PCA[:,:154],digits.train_Labels,6,10) # For the 95% dataset
        pickle.dump(merror,open('MFoldErrors154','wb'))
        merror = mfoldX(x.PCA[:,:50],digits.train_Labels,6,10) # for the 82.5% dataset
        pickle.dump(merror,open('MFoldErrors50','wb'))
    MFold_plots(x) # Makes graphs from our data
예제 #9
0
def main():  # Our main function
    digits = mnist()  # Creates a class with our mnist images and labels
    if open("Training SVD Data", "rb")._checkReadable() == 0:  # Check if file exist create it if it doesn't
        print("im here")
        x = center_matrix_SVD(digits.train_Images)  # Creates a class with our svd and associated info
        pickle.dump(x, open("Training SVD Data", "wb"))
    else:
        x = pickle.load(open("Training SVD Data", "rb"))
    if 0:  # change 0 to 1 if you want to run this agian
        merror = mfoldX(
            x.PCA[:, :], digits.train_Labels, 6, 10
        )  # Run X-validation and return error rates for the full dataset
        pickle.dump(merror, open("MFoldErrors", "wb"))  # Put our error rates in a file
        merror = mfoldX(x.PCA[:, :154], digits.train_Labels, 6, 10)  # For the 95% dataset
        pickle.dump(merror, open("MFoldErrors154", "wb"))
        merror = mfoldX(x.PCA[:, :50], digits.train_Labels, 6, 10)  # for the 82.5% dataset
        pickle.dump(merror, open("MFoldErrors50", "wb"))
    MFold_plots(x)  # Makes graphs from our data
예제 #10
0
def do_LDA2D_KNN(digits,p,q):
    l,r = LDA2D.iterative2DLDA(digits.train_Images, digits.train_Labels, p, q, 28, 28)

    new_train = np.zeros((digits.train_Images.shape[0],p*q))
    for i in range(digits.train_Images.shape[0]):
        new_train[i] = (np.transpose(l)@digits.train_Images[i].reshape(28,28)@r).reshape(p*q)
    new_test = np.zeros((digits.test_Images.shape[0],p*q))
    for i in range(digits.test_Images.shape[0]):
        new_test[i] = (np.transpose(l)@digits.test_Images[i].reshape(28,28)@r).reshape(p*q)
    myLDA = LDA()
    x = center_matrix_SVD(new_train)
    new_new_train = myLDA.fit_transform(new_train-x.centers,digits.train_Labels)
    new_new_test = myLDA.transform(new_test-x.centers)
    labels, nearest = KNN(new_new_train,digits.train_Labels,new_new_test,10,'euclidean')
    pickle.dump(labels, open('LDA2DFDA'+ str(p) + 'x' + str(q) + '_EU.p','wb'))
    #pickle.dump(nearest, open('NLDA2DFDA'+ str(p) + 'x' + str(q) + '_EU.p','wb'))
    labels, nearest = KNN(new_new_train,digits.train_Labels,new_new_test,10,'cityblock')
    pickle.dump(labels, open('LDA2DFDA'+ str(p) + 'x' + str(q) + '_CB.p','wb'))
    #pickle.dump(nearest, open('NLDA2DFDA'+ str(p) + 'x' + str(q) + '_CB.p','wb'))
    labels, nearest = KNN(new_new_train,digits.train_Labels,new_new_test,10,'cosine')
    pickle.dump(labels, open('LDA2DFDA'+ str(p) + 'x' + str(q) + '_CO.p','wb'))
예제 #11
0
def main():
    digits = mnist() # Creates a class with our mnist images and labels
    if open('Training SVD Data','rb')._checkReadable() == 0: # Check if file exist create it if it doesn't
        x = center_matrix_SVD(digits.train_Images) # Creates a class with our svd and associated info
        pickle.dump(x,open('Training SVD Data','wb'))
    else:
        x = pickle.load(open('Training SVD Data','rb'))  # If we already have the file just load it
    if 1: # if this is zero skip
        test_Images_Center = np.subtract(digits.test_Images,np.repeat(x.centers,digits.test_Images.shape[0],0))
        tic()
        myLDA = LDA()  # Create a new instance of the LDA class
        new_train = myLDA.fit_transform(x.PCA[:,:154],digits.train_Labels)  # It will fit based on x.PCA
        new_test = myLDA.transform([email protected](x.V[:154,:])) # get my transformed test dataset
        Knn_labels = local_kmeans_class(new_train,digits.train_Labels,new_test,10) # Run kNN on the new data
        toc()
        pickle.dump(Knn_labels,open('Loc_kmeans_fda_lab','wb'))

    fda = pickle.load(open('Loc_kmeans_fda_lab','rb'))
    labels_Full = pickle.load(open('KNN_Full','rb'))
    loc_full = pickle.load(open('Loc_kmeans_Full_lab','rb'))
    errors_fda,ind_fda = class_error_rate(np.transpose(fda),digits.test_labels)
    errors_near,ind_near = class_error_rate(labels_Full,digits.test_labels)
    errors_full,ind_full = class_error_rate(np.transpose(loc_full),digits.test_labels)
    labels_50 = pickle.load(open('KNN_50','rb'))
    errors_50,ind_50 = class_error_rate(labels_50,digits.test_labels)
    print(errors_full)
    plt.figure()
    plt.plot(np.arange(10)+1, errors_fda, color='Green', marker='o', markersize=10, label='fda Kmeans')  #plots the 82.5%
    plt.plot(np.arange(10)+1, errors_near, color='Blue', marker='o', markersize=10, label='kNN')
    plt.plot(np.arange(10)+1, errors_full, color='Yellow', marker='o', markersize=10, label='Full Kmeans')
    plt.plot(np.arange(10)+1, errors_50, color='Red', marker='o', markersize=10, label='kNN 50')
    axes = plt.gca()
    axes.set_ylim([0.015,0.12])
    plt.grid(1) # Turns the grid on
    plt.title('Plot of Local Kmeans with FDA Error rates')
    plt.legend(loc='upper right')  # Puts a legend on the plot
    plt.show()
    project_back(x,digits)
예제 #12
0
def main():
    digits = mnist() # Creates a class with our mnist images and labels
    if open('Training SVD Data','rb')._checkReadable() == 0: # Check if file exist create it if it doesn't
        print("im here")   # Just wanted to check if it was going in here
        x = center_matrix_SVD(digits.train_Images) # Creates a class with our svd and associated info
        pickle.dump(x,open('Training SVD Data','wb'))
    else:
        x = pickle.load(open('Training SVD Data','rb'))  # If we already have the file just load it
    if 0: # if this is zero skip
        test_Images_Center = np.subtract(digits.test_Images,np.repeat(x.centers,digits.test_Images.shape[0],0))
        tic()
        myLDA = LDA()  # Create a new instance of the LDA class
        new_train = myLDA.fit_transform(x.PCA[:,:154],digits.train_Labels)  # It will fit based on x.PCA
        new_test = myLDA.transform([email protected](x.V[:154,:])) # get my transformed test dataset
        Knn_labels, nearest = KNN(new_train,digits.train_Labels,new_test,10) # Run kNN on the new data
        toc()
        pickle.dump(Knn_labels,open('FDAKNN_Lables','wb'))
        pickle.dump(nearest,open('FDAKNN_neastest','wb'))
    fda = pickle.load(open('FDAKNN_Lables','rb'))
    labels_Full = pickle.load(open('KNN_Full','rb'))
    labels_50 = pickle.load(open('KNN_50','rb'))
    errors_fda,ind_fda = class_error_rate(fda,digits.test_labels)
    errors_near,ind_near = class_error_rate(labels_Full,digits.test_labels)
    errors_50,ind_50 = class_error_rate(labels_50,digits.test_labels)
    plt.figure()
    plt.plot(np.arange(10)+1, errors_fda, color='Green', marker='o', markersize=10, label='fda')  #plots the 82.5%
    plt.plot(np.arange(10)+1, errors_near, color='Blue', marker='o', markersize=10, label='kNN')
    plt.plot(np.arange(10)+1, errors_50, color='Yellow', marker='o', markersize=10, label='kNN 50')
    plt.grid(1) # Turns the grid on
    plt.title('Plot of Knn with FDA Error rates')
    plt.legend(loc='upper right')  # Puts a legend on the plot
    plt.show()
    print(confusion_matrix(digits.test_labels,labels_Full[5]))
    print(confusion_matrix(digits.test_labels,fda[5]))
    print(confusion_matrix(digits.test_labels,labels_50[5]))
    """