Пример #1
0
        X_train = X_train_origin[features]
        X_test = X_test_origin[features]

        i = 0
        result = Results()

        #getting test results for each classifier
        while (i < len(clfs)):
            clfs[i].fit(X_train, Y_train)
            preds = clfs[i].predict(X_test)

            result.accuracy = metrics.accuracy_score(Y_test, preds)
            result.precision = metrics.precision_score(Y_test, preds)
            result.recall = metrics.recall_score(Y_test, preds)
            result.k_cohen = metrics.cohen_kappa_score(Y_test, preds)
            result.f1_measure = metrics.f1_score(Y_test, preds)
            result.log_loss = metrics.log_loss(Y_test, clfs[i].predict_proba(X_test))
            #write results into file
            printResults(result, clfNames[i], len(features))
            i += 1

        featureSize -= 5

    #plotting test and train results
    dirPath = "Classification/Test/"
    plotter = Plotter(clfNames, dirPath)
    metricNames = ["Accuracy", "Precision", "Recall", "K_cohen", "F1_measure", "Log-loss"]
    i = 0
    while (i < len(metricNames)):
        plotter.plotMetric(dirPath + metricNames[i] + ".png", i + 1)
        i += 1
Пример #2
0
        #starting on the evaluation set
        X_train = X_train_origin[features]
        X_test = X_test_origin[features]


        i = 0
        result = Results()
        while(i < len(clfs)):
            clfs[i].fit(X_train, Y_train)
            preds = clfs[i].predict(X_test)

            result.accuracy = metrics.accuracy_score(Y_test, preds)
            result.precision = metrics.precision_score(Y_test, preds, average="macro")
            result.recall = metrics.recall_score(Y_test, preds, average="macro")
            result.k_cohen = metrics.cohen_kappa_score(Y_test, preds)
            result.f1_measure = metrics.f1_score(Y_test, preds, average="macro")
            result.log_loss = metrics.log_loss(Y_test, clfs[i].predict_proba(X_test))
            printResults(result, clfNames[i], len(features))
            i += 1

        featureSize -= 5

    dirPath = "MultiClassification/Test/"
    plotter = Plotter(clfNames, dirPath)
    metricNames = ["Accuracy", "Precision", "Recall", "K_cohen", "F1_measure", "Log-loss"]
    i = 0
    while( i < len(metricNames)):
        plotter.plotMetric( dirPath + metricNames[i]+".png", i + 1)
        i += 1

    dirPath = "MultiClassification/Train/"
Пример #3
0
        X_train = X_train_origin[features]
        X_test = X_test_origin[features]

        i = 0
        result = Results()

        #getting test results for each classifier
        while (i < len(clfs)):
            clfs[i].fit(X_train, Y_train)
            preds = clfs[i].predict(X_test)

            result.accuracy = metrics.accuracy_score(Y_test, preds)
            result.precision = metrics.precision_score(Y_test, preds)
            result.recall = metrics.recall_score(Y_test, preds)
            result.k_cohen = metrics.cohen_kappa_score(Y_test, preds)
            result.f1_measure = metrics.f1_score(Y_test, preds)
            result.log_loss = metrics.log_loss(Y_test,
                                               clfs[i].predict_proba(X_test))
            #write results into file
            printResults(result, clfNames[i], len(features))
            i += 1

        featureSize -= 5

    #plotting test and train results
    dirPath = "Classification/Test/"
    plotter = Plotter(clfNames, dirPath)
    metricNames = [
        "Accuracy", "Precision", "Recall", "K_cohen", "F1_measure", "Log-loss"
    ]
    i = 0
Пример #4
0
        i = 0
        result = Results()
        while (i < len(clfs)):
            clfs[i].fit(X_train, Y_train)
            preds = clfs[i].predict(X_test)

            result.accuracy = metrics.accuracy_score(Y_test, preds)
            result.precision = metrics.precision_score(Y_test,
                                                       preds,
                                                       average="macro")
            result.recall = metrics.recall_score(Y_test,
                                                 preds,
                                                 average="macro")
            result.k_cohen = metrics.cohen_kappa_score(Y_test, preds)
            result.f1_measure = metrics.f1_score(Y_test,
                                                 preds,
                                                 average="macro")
            result.log_loss = metrics.log_loss(Y_test,
                                               clfs[i].predict_proba(X_test))
            printResults(result, clfNames[i], len(features))
            i += 1

        featureSize -= 5

    dirPath = "MultiClassification/Test/"
    plotter = Plotter(clfNames, dirPath)
    metricNames = [
        "Accuracy", "Precision", "Recall", "K_cohen", "F1_measure", "Log-loss"
    ]
    i = 0
    while (i < len(metricNames)):