Ejemplo n.º 1
0
def prob_6():
    # Repeat experiments for magic telescope and housing using weights (w = 1/dist**2)
    arff = Arff('datasets/credit.arff')
    arff.shuffle()
    test = arff.create_subset_arff(slice(arff.instance_count // 4))
    train = arff.create_subset_arff(slice(arff.instance_count // 4, None))

    train.normalize()
    test.normalize()

    krange = np.arange(1, 16, 2)
    accs = []
    for k in krange:
        knn = KNN(k, weighting=True, vdm=True)
        predictions = knn.knn(train.get_features(), train.get_labels(),
                              test.get_features())
        acc = predictions == np.ravel(test.get_labels().data)
        print("k:", k, "accuracy:", sum(acc) / len(acc))
        accs.append(sum(acc) / len(acc))

    plt.plot(krange, accs)
    plt.title("K Size Versus Accuracy on Credit Approval (Weighted)")
    plt.xlabel("K")
    plt.ylabel("Accuracy")
    plt.show()
Ejemplo n.º 2
0
def prob_3(weighted_d = False):
    test_arff = Arff("housing_testing_data.arff")
    train_arff = Arff("housing_training_data.arff")
    test_arff.shuffle()
    train_arff.shuffle()
    test_arff.normalize()
    train_arff.normalize()

    K = [1, 3, 5, 7, 9, 11, 13, 15]
    A = []
    for k_hat in K:
        test_data = np.hstack((test_arff.get_features().data, test_arff.get_labels().data))
        train_data = np.hstack((train_arff.get_features().data, train_arff.get_labels().data))
        KNNC = KNNClassifier(k_hat, train_data, test_data)
        A.append(KNNC.get_accuracy_regress(weighted_d))
    
    plt.plot(K, A, label="")
    t = "KNN Regression M.S.E Housing"
    if weighted_d:
        t += "(weighted-d)"
    weighted_d
    plt.title(t)
    plt.xlabel("K")
    plt.ylabel("M.S.E")
    # plt.legend()
    plt.show()
Ejemplo n.º 3
0
def prob_5():
        arff = Arff('datasets/cars.arff')
        arff.shuffle()

        test = arff.create_subset_arff(slice(arff.instance_count//10))
        training = arff.create_subset_arff(slice(arff.instance_count//10,None))

        tf = test.get_features()
        tl = test.get_labels()

        splits = k_fold_cv(arff)

        arff = arff.create_subset_arff(slice(arff.instance_count//4,None))
        d = DecisionTreeLearner()
        d.train(arff.get_features(), arff.get_labels())

        a = d.tree

        arff = Arff('datasets/voting.arff')
        arff.shuffle()
        arff = arff.create_subset_arff(slice(arff.instance_count//4,None))
        d = DecisionTreeLearner()
        d.train(arff.get_features(), arff.get_labels())

        b = d.tree

        return a, b
Ejemplo n.º 4
0
def prob5():
    arff = Arff(sys.argv[2])
    imp_atts = [1, 3, 4, 5, 7, 9, 11, 12, 13]
    arff.shuffle()
    n = len(arff.get_labels().data)
    t = int(n * .55)
    v = n - int(n * .20)
    train_set = arff.create_subset_arff(row_idx=slice(0, t, 1),
                                        col_idx=imp_atts)
    test_set = arff.create_subset_arff(row_idx=slice(t, v, 1),
                                       col_idx=imp_atts)
    validation_set = arff.create_subset_arff(row_idx=slice(v, n, 1),
                                             col_idx=imp_atts)

    epochs = []
    momentums = np.linspace(0, 1.5, 20)
    # momentums = [.5, 1]

    for momentum in momentums:
        print(momentum)
        nn = NeuralNetwork(8, [30], 11, LR=.1, momentum=momentum)
        all_acc_va, all_mse_va, all_mse_te, all_mse_tr = nn.train_set(
            train_set, test_set, validation_set, w=5)
        epochs.append(len(all_acc_va))

    plt.plot(momentums, epochs)
    plt.title("Vowel Momentum vs Epoch Convergence")
    plt.xlabel("Momentum")
    plt.ylabel("Epochs til Conv.")
    plt.show()
Ejemplo n.º 5
0
def prob_1():
    arff = Arff('datasets/lenses.arff')
    acc = []
    for _ in range(10):

        arff.shuffle()
        testing = arff.create_subset_arff(slice(arff.instance_count//5))
        training = arff.create_subset_arff(slice(arff.instance_count//5, None))

        d = DecisionTreeLearner()

        features = training.get_features()
        labels = training.get_labels()


        t_feat = testing.get_features()
        t_labels = testing.get_labels()

        d.train(features, labels)

        accuracy = d.get_accuracy(t_feat, t_labels)
        print(accuracy)
        acc.append(accuracy)

    print(sum(acc)/len(acc))
Ejemplo n.º 6
0
def prob_4(lr):
    # read in vowels dataset
    arff = Arff('datasets/vowels.arff')

    # Leave out the test/train and person features, which are unceccessary.
    arff = arff.create_subset_arff(col_idx=slice(2, None), label_count=1)

    # Get a 75/25 split
    arff.shuffle()
    training = arff.create_subset_arff(slice(arff.instance_count // 4))
    test = arff.create_subset_arff(slice(arff.instance_count // 4, -1))
    t_features = test.get_features()
    t_labels = test.get_labels()

    # Get a 15% Validation set
    validation = training.create_subset_arff(slice(arff.instance_count // 5))
    training = training.create_subset_arff(
        slice(arff.instance_count // 5, None))
    v_features = validation.get_features()
    v_labels = validation.get_labels()

    domain = 2**np.arange(0, 8)
    training_mse = []
    validation_mse = []
    test_mse = []
    for nodes in domain:
        mse = 0
        vmse = 0
        tmse = 0
        for _ in range(100):
            learner = MultilayerPerceptronLearner(
                [training.features_count, nodes, 11], momentum=0)
            # learner.zero_weights()
            learner.lr = lr
            learner.max_epoch = 500
            learner.train(training.get_features(), training.get_labels(),
                          validation.get_features(), validation.get_labels())

            tmse += learner.get_mse(test.get_features(), test.get_labels())
            mse += learner.get_mse(training.get_features(),
                                   training.get_labels())
            vmse += learner.get_mse(validation.get_features(),
                                    validation.get_labels())

        training_mse.append(mse / 100)
        validation_mse.append(vmse / 100)
        test_mse.append(tmse / 100)

    plt.semilogx(domain, test_mse, basex=2, label="Test Set MSE")
    plt.semilogx(domain, training_mse, basex=2, label="Training Set MSE")
    plt.semilogx(domain, validation_mse, basex=2, label="Validation Set MSE")
    plt.title("MSE vs Number of Hidden Nodes")
    plt.xlabel("Number of Hidden Nodes")
    plt.ylabel("Mean Squared Error")
    plt.legend()
    plt.show()
Ejemplo n.º 7
0
def prob_6_b():
    # read in vowels dataset
    arff = Arff('datasets/vowels.arff')

    # Leave out the test/train and person features, which are unceccessary.
    arff = arff.create_subset_arff(col_idx=slice(2, None), label_count=1)

    # Get a 75/25 split
    arff.shuffle()
    training = arff.create_subset_arff(slice(arff.instance_count // 4))
    test = arff.create_subset_arff(slice(arff.instance_count // 4, -1))
    t_features = test.get_features()
    t_labels = test.get_labels()

    # Get a 15% Validation set
    validation = training.create_subset_arff(slice(arff.instance_count // 5))
    training = training.create_subset_arff(
        slice(arff.instance_count // 5, None))
    v_features = validation.get_features()
    v_labels = validation.get_labels()
    features = training.get_features()
    labels = training.get_labels()

    taccuracy = []
    accuracy = []
    domain = 2**np.arange(1, 7)
    for i in domain:
        tacc = 0
        acc = 0
        for _ in range(3):
            learner = MultilayerPerceptronLearner([training.features_count] +
                                                  [i] * (32 // i) + [11],
                                                  momentum=.85)
            # learner.zero_weights()
            learner.lr = .1
            learner.max_epoch = 500
            learner.train(training.get_features(), training.get_labels(),
                          validation.get_features(), validation.get_labels())
            tacc += learner.get_accuracy(t_features, t_labels)
            acc += learner.get_accuracy(features, labels)
        accuracy.append(acc / 3)
        taccuracy.append(tacc / 3)

    plt.semilogx(domain, accuracy, basex=2, label="Training Set Accuracy")
    plt.semilogx(domain, taccuracy, basex=2, label="Test Set Accuracy")
    plt.title("Node Distribution vs Accuracy")
    plt.xlabel("Number of Nodes per Hidden Layer")
    plt.ylabel("Accuracy")
    plt.legend()
    plt.show()
Ejemplo n.º 8
0
def prob3():
    """ """
    arff = Arff(sys.argv[2])
    imp_atts = [1, 3, 4, 5, 7, 9, 11, 12, 13]
    arff.shuffle()
    n = len(arff.get_labels().data)
    t = int(n * .55)
    v = n - int(n * .20)
    train_set = arff.create_subset_arff(row_idx=slice(0, t, 1),
                                        col_idx=imp_atts)
    test_set = arff.create_subset_arff(row_idx=slice(t, v, 1),
                                       col_idx=imp_atts)
    validation_set = arff.create_subset_arff(row_idx=slice(v, n, 1),
                                             col_idx=imp_atts)

    best_mse_te = []
    best_mse_tr = []
    best_mse_va = []
    epochs = []

    LRS = [.01, .1, .5, .8, 1.5]
    for LR in LRS:
        # print(LR)
        nn = NeuralNetwork(8, [16], 11, LR=LR, momentum=0)
        all_acc_va, all_mse_va, all_mse_te, all_mse_tr = nn.train_set(
            train_set, test_set, validation_set, w=5)
        best_mse_te.append(min(all_mse_te))
        best_mse_tr.append(min(all_mse_tr))
        best_mse_va.append(min(all_mse_va))
        epochs.append(len(all_mse_va))

    plt.plot(LRS, best_mse_te, label="MSE Te")
    plt.plot(LRS, best_mse_tr, label="MSE Tr")
    plt.plot(LRS, best_mse_va, label="MSE V.A")
    plt.title("Vowel MSE vs Learning Rate")
    plt.xlabel("Learning Rate")
    plt.ylabel("MSE")
    plt.legend()
    plt.show()

    plt.plot(LRS, epochs)
    plt.title("Vowel Epochs vs Learning Rate")
    plt.xlabel("Learning Rate")
    plt.ylabel("Epochs")
    plt.legend()
    plt.show()
Ejemplo n.º 9
0
def prob_5(lr, hidden):
    # read in vowels dataset
    arff = Arff('datasets/vowels.arff')

    # Leave out the test/train and person features, which are unceccessary.
    arff = arff.create_subset_arff(col_idx=slice(2, None), label_count=1)

    # Get a 75/25 split
    arff.shuffle()
    training = arff.create_subset_arff(slice(arff.instance_count // 4))
    test = arff.create_subset_arff(slice(arff.instance_count // 4, -1))
    t_features = test.get_features()
    t_labels = test.get_labels()

    # Get a 15% Validation set
    validation = training.create_subset_arff(slice(arff.instance_count // 5))
    training = training.create_subset_arff(
        slice(arff.instance_count // 5, None))
    v_features = validation.get_features()
    v_labels = validation.get_labels()

    epochs = []
    accuracy = []
    domain = np.linspace(0, 1, 20)
    for momentum in domain:
        e = 0
        acc = 0
        for _ in range(10):
            learner = MultilayerPerceptronLearner(
                [training.features_count, hidden, 11], momentum=momentum)
            # learner.zero_weights()
            learner.lr = lr
            learner.max_epoch = 500
            learner.train(training.get_features(), training.get_labels(),
                          validation.get_features(), validation.get_labels())
            acc += learner.get_accuracy(t_features, t_labels)
            e += learner.epochs
        epochs.append(e / 10)
        accuracy.append(acc / 10)

    print(accuracy)
    plt.plot(domain, epochs)
    plt.title("Number of Training Epochs vs Momentum")
    plt.xlabel("Momentum Constant")
    plt.ylabel("Number of Training Epochs")
    plt.show()
Ejemplo n.º 10
0
def prob_5():
    cont_mask = [1, 2, 7, 10, 13, 14, 16]
    cate_mask = [0, 3, 4, 5, 6, 8, 9, 11, 12, 15]

    arff = Arff("credit_approval_data.arff")
    arff.shuffle()
    arff.normalize()

    n = len(arff.get_labels().data)
    t = int(n * .7)
    train_data = arff.create_subset_arff(row_idx=slice(0, t, 1))
    test_data = arff.create_subset_arff(row_idx=slice(t, n, 1))
    test_data = np.hstack((test_data.get_features().data, test_data.get_labels().data))
    train_data = np.hstack((train_data.get_features().data, train_data.get_labels().data))
    #b,30.83,0,u,g,w,v,1.25,t,t,01,f,g,00202,0,+
    dist_matrix = np.ones((16, 16))
    np.fill_diagonal(dist_matrix, 0)
    KNNC = KNNClassifier(8, train_data, test_data)
    print(KNNC.get_accuracy_mixed(cate_mask, cont_mask, dist_matrix))
Ejemplo n.º 11
0
def prob_3():
    print('cars')
    arff = Arff('datasets/cars.arff')
    arff.shuffle()
    d = DecisionTreeLearner()
    d.train(arff.get_features(), arff.get_labels())

    a = d.tree

    print()
    print('voting')
    arff = Arff('datasets/voting.arff')
    arff.shuffle()
    d = DecisionTreeLearner()
    d.train(arff.get_features(), arff.get_labels())

    b = d.tree

    return a, b
Ejemplo n.º 12
0
def prob_2(weighted_d = False):
    """ """
    k = 3
    test_arff = Arff("magic_telescope_testing_data.arff")
    train_arff = Arff("magic_telescope_training_data.arff")
    test_arff.shuffle()
    train_arff.shuffle()

    # attributes = test_arff.get_attr_names()
    test_data = np.hstack((test_arff.get_features().data, test_arff.get_labels().data))
    train_data = np.hstack((train_arff.get_features().data, train_arff.get_labels().data))
    KNNC = KNNClassifier(k, train_data, test_data)
    acc = KNNC.get_accuracy(weighted_d)

    test_arff.normalize()
    train_arff.normalize()
    n_test_data = np.hstack((test_arff.get_features().data, test_arff.get_labels().data))
    n_train_data = np.hstack((train_arff.get_features().data, train_arff.get_labels().data))
    n_KNNC = KNNClassifier(k, n_test_data, n_train_data)
    acc_n = n_KNNC.get_accuracy(weighted_d)

    # print(np.array([[acc,acc_n]]))
    print(acc,acc_n)
    # show_table(["Not Normalized"  "Normailzed"], ["Accuracy"], np.array([[acc,acc_n]]), title = "Normalized vs Non-normalized, k=3")

    K = [1, 3, 5, 7, 9, 11, 13, 15]
    A = []
    for k_hat in K:
        # n_test_data = np.hstack((test_arff.get_features().data, test_arff.get_labels().data))
        # n_train_data = np.hstack((train_arff.get_features().data, train_arff.get_labels().data))
        n_KNNC = KNNClassifier(k_hat, n_train_data, n_test_data)
        A.append(n_KNNC.get_accuracy(weighted_d))

    plt.plot(K, A, label="")
    t = "KNN Accuracy Telesc. "
    if weighted_d:
        t += "(weighted-d)"
    plt.title(t)
    plt.xlabel("K")
    plt.ylabel("Accuracy")
    # plt.legend()
    plt.show()
Ejemplo n.º 13
0
def prob_6():
    """ """
    k = 3
    test_arff = Arff("magic_telescope_testing_data.arff")
    train_arff = Arff("magic_telescope_training_data.arff")
    test_arff.shuffle()
    train_arff.shuffle()
    test_arff.normalize()
    train_arff.normalize()

    K = [1, 3, 5]
    T = []
    A = []
    T_KSM = []
    A_KSM = []
    for k_hat in K:
        test_data = np.hstack((test_arff.get_features().data, test_arff.get_labels().data))
        train_data = np.hstack((train_arff.get_features().data, train_arff.get_labels().data))
        KNNC = KNNClassifier(k_hat, train_data, test_data)

        t = time.time()
        A.append(KNNC.get_accuracy())
        T.append(time.time() - t)
        KNNC.induce_KSM()

        t = time.time()
        A_KSM.append(KNNC.get_accuracy())
        T_KSM.append(time.time() - t)

    ax = plt.axes(projection='3d')
    ax.plot(K, A, T, label="No-KSM")
    ax.plot(K, A_KSM, T_KSM, label="KSM")

    ax.set_xlabel('K')
    ax.set_ylabel('Accuracy')
    ax.set_zlabel('Time')

    t = "KNN Accuracy w/ IKSM"
    plt.title(t)
    plt.legend()
    plt.show()
Ejemplo n.º 14
0
def prob_2():
    # Get accuracy on cars.arff
    arff = Arff('datasets/cars.arff')
    arff.shuffle()

    acc, tacc, = k_fold_cv(arff, 10)
    print('cars:')
    print('acc',acc)
    print('tacc',tacc)
    print('tot',sum(tacc)/len(tacc))
    print()

    # Get accuracy of voting.arff
    arff = Arff('datasets/voting.arff')
    arff.shuffle()
    acc,tacc, = k_fold_cv(arff, 10)

    print('voting;')
    print('acc',acc)
    print('tacc',tacc)
    print('tot',sum(tacc)/len(tacc))
    print()
Ejemplo n.º 15
0
def prob_2():
    # Get arff from iris database
    iris = Arff('datasets/iris.arff')

    # Get a random 75/25 split
    iris.shuffle()

    validation = iris.create_subset_arff(slice(iris.instance_count // 4))
    v_features = validation.get_features()
    v_labels = validation.get_labels()

    training = iris.create_subset_arff(slice(iris.instance_count // 4, None))
    features = training.get_features()
    labels = training.get_labels()
    # Make the learner
    # shape = [4 8 3]
    learner = MultilayerPerceptronLearner([4, 8, 3], momentum=0)

    training_mse = []
    validation_mse = []
    classification_accuracy = []

    for epoch in range(learner.max_epoch):
        learner.train_one_epoch(features, labels)
        training_mse.append(learner.get_mse(features, labels))
        validation_mse.append(learner.get_mse(v_features, v_labels))
        classification_accuracy.append(
            learner.get_accuracy(v_features, v_labels))
        if learner.check_for_convergence(v_features, v_labels):
            break

    fig, (ax1, ax2) = plt.subplots(nrows=2, sharex=True)
    ax1.plot(training_mse, label="Training Data MSE")
    ax1.plot(validation_mse, label="Validation Data MSE")
    ax1.legend()
    ax2.plot(classification_accuracy, label="Classification Accuracy")
    plt.legend()
    plt.show()
Ejemplo n.º 16
0
def prob2():
    arff = Arff(sys.argv[1])
    arff.shuffle()
    n = len(arff.get_labels().data)
    t = int(n * .55)
    v = n - int(n * .20)
    train_set = arff.create_subset_arff(row_idx=slice(0, t, 1))
    test_set = arff.create_subset_arff(row_idx=slice(t, v, 1))
    validation_set = arff.create_subset_arff(row_idx=slice(v, n, 1))

    nn = NeuralNetwork(4, [9], 3, LR=.1)
    all_acc_va, all_mse_va, all_mse_te, all_mse_tr = nn.train_set(
        train_set, test_set, validation_set)

    d = [x for x in range(len(all_acc_va))]
    plt.plot(d, all_mse_te, label="test MSE")
    plt.plot(d, all_mse_va, label="Val. MSE")
    plt.plot(d, all_acc_va, label="Val. Accuracy")
    plt.title("Iris Dataset")
    plt.xlabel("Epochs")
    plt.ylabel("%")
    plt.legend()
    plt.show()
Ejemplo n.º 17
0
def prob4():
    arff = Arff(sys.argv[2])
    imp_atts = [1, 3, 4, 5, 7, 9, 11, 12, 13]
    arff.shuffle()
    n = len(arff.get_labels().data)
    t = int(n * .55)
    v = n - int(n * .20)
    train_set = arff.create_subset_arff(row_idx=slice(0, t, 1),
                                        col_idx=imp_atts)
    test_set = arff.create_subset_arff(row_idx=slice(t, v, 1),
                                       col_idx=imp_atts)
    validation_set = arff.create_subset_arff(row_idx=slice(v, n, 1),
                                             col_idx=imp_atts)

    best_mse_te = []
    best_mse_tr = []
    best_mse_va = []
    hidden_nodes = [1, 3, 6, 10, 13, 15, 16, 18, 20, 22, 25, 30, 40]

    for nodes in hidden_nodes:
        # print(nodes)
        nn = NeuralNetwork(8, [nodes], 11, LR=.1, momentum=0)
        all_acc_va, all_mse_va, all_mse_te, all_mse_tr = nn.train_set(
            train_set, test_set, validation_set, w=5)

        best_mse_te.append(min(all_mse_te))
        best_mse_tr.append(min(all_mse_tr))
        best_mse_va.append(min(all_mse_va))

    plt.plot(hidden_nodes, best_mse_te, label="MSE Te")
    plt.plot(hidden_nodes, best_mse_tr, label="MSE Tr")
    plt.plot(hidden_nodes, best_mse_va, label="MSE V.A")
    plt.title("Vowel MSE vs Hidden Nodes")
    plt.xlabel("Hidden Nodes")
    plt.ylabel("MSE")
    plt.legend()
    plt.show()
Ejemplo n.º 18
0
def prob_3():
    # read in vowels dataset
    arff = Arff('datasets/vowels.arff')

    # Leave out the test/train and person features, which are unceccessary.
    arff = arff.create_subset_arff(col_idx=slice(2, None), label_count=1)

    # Get a 75/25 split
    arff.shuffle()
    training = arff.create_subset_arff(slice(arff.instance_count // 4))
    test = arff.create_subset_arff(slice(arff.instance_count // 4, -1))
    t_features = test.get_features()
    t_labels = test.get_labels()

    # Get a 15% Validation set
    validation = training.create_subset_arff(slice(arff.instance_count // 5))
    training = training.create_subset_arff(
        slice(arff.instance_count // 5, None))
    v_features = validation.get_features()
    v_labels = validation.get_labels()

    training_mse = []
    validation_mse = []
    test_mse = []
    epochs = []
    domain = np.logspace(-3, 0)
    for lr in domain:
        mse = 0
        vmse = 0
        tmse = 0
        e = 0
        for _ in range(3):
            learner = MultilayerPerceptronLearner(
                [training.features_count, 2 * training.features_count, 11],
                momentum=0)
            learner.zero_weights()
            learner.lr = lr
            learner.max_epoch = 500
            learner.train(training.get_features(), training.get_labels(),
                          validation.get_features(), validation.get_labels())
            e += learner.epochs
            tmse += learner.get_mse(test.get_features(), test.get_labels())
            mse += learner.get_mse(training.get_features(),
                                   training.get_labels())
            vmse += learner.get_mse(validation.get_features(),
                                    validation.get_labels())
        epochs.append(e / 3)
        training_mse.append(mse / 3)
        validation_mse.append(vmse / 3)
        test_mse.append(tmse / 3)

    plt.semilogx(domain, test_mse, label="Test Set MSE")
    plt.semilogx(domain, training_mse, label="Training Set MSE")
    plt.semilogx(domain, validation_mse, label="Validation Set MSE")
    plt.title("MSE vs Learning Rate")
    plt.xlabel("Learning Rate")
    plt.ylabel("Mean Squared Error")
    plt.legend()
    plt.show()

    plt.semilogx(domain, epochs)
    plt.title("Number of Training Epochs vs Learning Rate")
    plt.xlabel("Learning Rate")
    plt.ylabel("Number of Training Epochs")
    plt.show()