def fit_and_predict(clf, C, count, fold, training_samples, training_labels,
                    validation_samples, validation_labels, use_balanced_set,
                    use_feature_selection):
    clf.fit(training_samples, training_labels)
    result = clf.predict(validation_samples)
    accuracy, precision, recall, specificity, f_score = calculate_precision_recall(
        validation_labels, result)
    create_result_txt_for_svm_linear(count, C, fold, accuracy, precision,
                                     recall, specificity, f_score,
                                     use_balanced_set, use_feature_selection)
def test_SVM_Linear(tempC, count, use_balanced_set, use_feature_selection):
    clf = svm.LinearSVC(C=tempC)
    training_samples, training_labels, test_samples, test_labels = samples_and_labels(
        count, 0, use_balanced_set, use_feature_selection)
    clf.fit(training_samples, training_labels)
    result = clf.predict(test_samples)
    accuracy, precision, recall, specificity, f_score = calculate_precision_recall(
        test_labels, result)
    create_result_txt_for_roc_and_pr_plots('svm_linear', clf, test_samples,
                                           test_labels, use_feature_selection)
    print 'Accuracy = ' + str(accuracy) + '\nPrecision = ' + str(
        precision) + '\nRecall = ' + str(recall) + '\nSpecificity = ' + str(
            specificity) + '\nF1 Score = ' + str(f_score) + '\n'
Beispiel #3
0
def test_Random_Forest(num_tree, max_depth, count, use_balanced_set,
                       use_feature_selection):
    clf = RandomForestClassifier(n_estimators=num_tree, max_depth=max_depth)
    training_samples, training_labels, test_samples, test_labels = samples_and_labels(
        count, 0, use_balanced_set, use_feature_selection)
    clf.fit(training_samples, training_labels)
    result = clf.predict(test_samples)
    accuracy, precision, recall, specificity, f_score = calculate_precision_recall(
        test_labels, result)
    create_result_txt_for_roc_and_pr_plots('random_forest', clf, test_samples,
                                           test_labels, use_feature_selection)
    print 'Accuracy = ' + str(accuracy) + '\nPrecision = ' + str(
        precision) + '\nRecall = ' + str(recall) + '\nSpecificity = ' + str(
            specificity) + '\nF1 Score = ' + str(f_score) + '\n'
def test_Multinomial_Naive_bayes(alpha, count, use_balanced_set,
                                 use_feature_selection):
    clf = MultinomialNB(alpha=alpha)
    training_samples, training_labels, test_samples, test_labels = samples_and_labels(
        count, 0, use_balanced_set, use_feature_selection, True)
    clf.fit(training_samples, training_labels)
    result = clf.predict(test_samples)
    accuracy, precision, recall, specificity, f_score = calculate_precision_recall(
        test_labels, result)
    if not use_feature_selection:
        create_result_txt_for_roc_and_pr_plots('multinomial_naive_bayes', clf,
                                               test_samples, test_labels,
                                               use_feature_selection)
    print 'Accuracy = ' + str(accuracy) + '\nPrecision = ' + str(
        precision) + '\nRecall = ' + str(recall) + '\nSpecificity = ' + str(
            specificity) + '\nF1 Score = ' + str(f_score) + '\n'