예제 #1
0
def main():

    print ("---now running Decision Tree -> Classification---")

    data = datasets.load_iris()
    X = data.data
    y = data.target

    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, seed=3)

    clf = classification_tree()
    clf.fit(X_train, y_train)
    y_pred = clf.predict(X_test)

    accuracy = accuracy_score(y_test, y_pred)

    print ("Accuracy:", accuracy)
예제 #2
0
def main():

    print("--- now running XGBoost ---")

    data = datasets.load_iris()
    X = data.data
    y = data.target

    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.4,
                                                        seed=2)

    clf = xgboost_func()
    clf.fit(X_train, y_train)
    y_pred = clf.predict(X_test)

    accuracy = accuracy_score(y_test, y_pred)

    print("Accuracy:", accuracy)
예제 #3
0
def main():

    print("--- now running Naive Bayes ---")

    data = datasets.load_digits()
    X = normalize(data.data)
    y = data.target

    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.4,
                                                        seed=2)

    clf = naive_bayes()
    clf.fit(X_train, y_train)
    y_pred = clf.predict(X_test)

    accuracy = accuracy_score(y_test, y_pred)

    print("Accuracy:", accuracy)
def Experiment_2(skfold_data):
    print("Running experiment 2...")
    Accuracy = []
    m1 = list(np.arange(0, 1, 0.1))
    m2 = list(np.arange(1, 11, 1))
    m = m1 + m2
    for sm in m:
        sm = round(sm, 2)
        check_acc = []
        for i in range(len(skfold_data)):
            t_train, t_test = train_test.kfold_train_test(skfold_data, i)
            train_x, train_y, test_x, test_y = train_test.train_test_split(
                t_train, t_test)
            accu = NBC.predictMAP(train_x, train_y, test_x, test_y, sm)
            check_acc.append(accu)
        Accuracy.append(check_acc)

    #calculating average accuracy
    avgAccuracy2 = []
    for i in range(len(Accuracy)):
        x = np.average(Accuracy[i])
        avgAccuracy2.append(x)
    print("list of accuracies:")
    print(Accuracy)
    print("list of average accuracies")
    print(avgAccuracy2)

    #calculating standard deviation
    std = []
    for i in range(len(Accuracy)):
        x = np.std(Accuracy[i])
        std.append(x)
    print("Standard Deviation: ")
    print(std)

    #plotting
    plt.errorbar(m, avgAccuracy2, std)
    plt.xlabel('smoothing factor')
    plt.ylabel('Average Accuracies')
    plt.show()
예제 #5
0
    def fit(self,
            features,
            target_variable,
            test_sz=0.25,
            seed=2,
            threshold=None):
        """
        Runs all classification algorithms turn by turn and Pandas dataframe as score card with classification accuracy
        
        """

        print("---AutoML Running ---\n\n")
        X = features
        y = target_variable
        # X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, seed=seed)
        X_train, X_test, y_train, y_test = train_test_split(X,
                                                            y,
                                                            test_size=test_sz)

        num_class = len(np.unique(target_variable))

        if num_class == 1:
            print("Invalid Data Alert : Only one target class")
            return

        if num_class == 2:
            print("-- Begin Binary Classification --")

        if num_class > 2:
            print("-- Begin Multi Class Classication --")
            print("Number of detected classes : ", num_class)

        best_so_far_ = 0
        scoreCard = []  # initialised empty dictionary

        # Linear Algorithms - Fastest

        #############################################################################
        # lda
        if X.shape[1] != 1:
            clf = LinearDiscrimentAnalysis(projection_dim=2)
            clf.fit(X_train, y_train)
            pred = clf.predict(X_test, y_test)
            score = accuracy_score(y_test, pred)
            params = lda_dic
            scoreCard.append({
                'Algorithm': 'Linear Discriminant Analysis',
                'Accuracy Score': score,
                'Params': {
                    'projection_dim': 2
                }
            })
            best_so_far_ = max(best_so_far_, score)
            del clf

        #############################################################################
        # Naive Bayes

        clf = naive_bayes()
        clf.fit(X_train, y_train)
        y_pred = clf.predict(X_test)
        score = accuracy_score(y_test, y_pred)
        scoreCard.append({
            'Algorithm': 'Naive Bayes',
            'Accuracy Score': score,
            'Params': '-'
        })
        best_so_far_ = max(best_so_far_, score)
        del clf

        #############################################################################

        # logistic Regression

        rs = random_search(LogisticRegression, lr_dic, n_iter=300)
        rs.fit(X_train, y_train, X_test, y_test)
        scoreCard.append({
            'Algorithm': 'Logistice Regression',
            'Accuracy Score': rs.best_score_,
            'Params': rs.best_params_
        })
        best_so_far_ = max(best_so_far_, rs.best_score_)
        del rs

        #############################################################################

        # KNN
        try:
            clf = KNN(X_train, X_test, y_train, y_test)
            score = clf[0]
            param = clf[1]
            best_so_far_ = max(best_so_far_, score)
            scoreCard.append({
                'Algorithm': 'KNN',
                'Accuracy Score': score,
                'Params': param
            })
            del clf
        except:
            pass

        #clf=KNN(X_train, X_test, y_train, y_test)
        #score=clf
        #best_so_far_=max(best_so_far_,score)
        #scoreCard.append({
        #                'Algorithm': 'KNN',
        #                'Accuracy Score': score,
        #                 'Params': '-'
        #                 })
        #del clf

        #############################################################################

        #Decision Tree

        clf = classification_tree()
        clf.fit(X_train, y_train)
        y_pred = clf.predict(X_test)
        score = accuracy_score(y_test, y_pred)

        scoreCard.append({
            'Algorithm': 'Decision Tree',
            'Accuracy Score': score,
            'Params': {
                'min_samples_split': 2
            }
        })
        best_so_far_ = max(best_so_far_, score)

        del clf

        #############################################################################

        # Random Forrest

        #scoreCard.append(['Random Forrest', rs.best_score_, rs.best_params_])
        #best_so_far_ = max(best_so_far_, rs.best_score_)

        #############################################################################

        if (threshold is not None and best_so_far_ >= threshold):
            score_card = self.get_score_card(scoreCard)
            return score_card

        # Further Algorithms Only used when best_so_far_ is still less than threshhold,
        # by default, it is set to 'None'

        #############################################################################

        # XGB

        rs = random_search(xgboost_func, xgb_dic, n_iter=1)  # 50)
        rs.fit(X_train, y_train, X_test, y_test)
        #scoreCard.append(['XGBoost', rs.best_score_, rs.best_params_])
        scoreCard.append({
            'Algorithm': 'XGBoost',
            'Accuracy Score': rs.best_score_,
            'Params': rs.best_params_
        })
        best_so_far_ = max(best_so_far_, rs.best_score_)
        del rs
        #############################################################################

        # Adaboost

        rs = random_search(Adaboost, adb_dic, n_iter=7)
        rs.fit(X_train, y_train, X_test, y_test)
        #scoreCard.append(['Adaboost', rs.best_score_, rs.best_params_])
        scoreCard.append({
            'Algorithm': 'Adaboost',
            'Accuracy Score': rs.best_score_,
            'Params': rs.best_params_
        })
        best_so_far_ = max(best_so_far_, rs.best_score_)
        del rs

        #############################################################################

        score_card = self.get_score_card(scoreCard)
        return score_card
def Experiment_1(skfold_data):
    print("Running experiment 1...")
    accuracy_m0 = []
    accuracy_m1 = []

    #smoothing factor
    m = [0, 1]
    #for each smoothing factor
    for sm in m:
        for i in range(len(skfold_data)):
            #this loop considers ith fold for test dataset
            #get train(900) and test(100)
            t_train, t_test = train_test.kfold_train_test(skfold_data, i)

            #generate subsample factors
            sampling_factor = np.arange(0.1, 1.1, 0.1)
            check_acc = []
            size_of_train = []
            for n in sampling_factor:
                #loop for subsamples
                n = round(n, 2)
                #n = 0.2
                sample_size_for_train = int(len(t_train) * n)
                size_of_train.append(sample_size_for_train)
                #randomly select datapoints
                #sample_train = random.sample(t_train,sample_size_for_train)
                sample_train = t_train[0:sample_size_for_train]
                train_x, train_y, test_x, test_y = train_test.train_test_split(
                    sample_train, t_test)
                accu = NBC.predictMAP(train_x, train_y, test_x, test_y, sm)
                #append all the accuracies of subsamples of kth fold
                check_acc.append(accu)
            if (sm == 0):
                accuracy_m0.append(check_acc)
            if (sm == 1):
                accuracy_m1.append(check_acc)
    avgAccu0, avgAccu1 = calAccuracy(accuracy_m0, accuracy_m1)
    print("Average accuracies when m=0: ")
    print(avgAccu0)
    print("Average accuracies when m=1: ")
    print(avgAccu1)

    #calculate standard deviation
    sd_0 = []
    sd_1 = []
    for i in range(len(accuracy_m0)):
        x1 = np.std(accuracy_m0[i])
        sd_0.append(x1)
        x2 = np.std(accuracy_m1[i])
        sd_1.append(x2)

    print("standard deviation for m=0: ", sd_0)
    print("standard deviation for m=1: ", sd_1)
    """Refered some online material to know about how to plot error bar graphs"""
    """https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.errorbar.html"""
    """https://pythonforundergradengineers.com/python-matplotlib-error-bars.html"""
    plt.errorbar(size_of_train, avgAccu0, sd_0, label='m=0')
    plt.errorbar(size_of_train, avgAccu1, sd_1, label='m=1')
    plt.legend(loc='lower right')
    plt.xlabel('train set size')
    plt.ylabel('Average Accuracies')
    plt.show()