sys.path.pop(0)

# Calling up metrics from the model scripts
# KNN -----------------------------------------------
metrics_knn = produce_model_metrics(fit_knn, test_set, test_class_set, 'knn')
# Call each value from dictionary
predictions_knn = metrics_knn['predictions']
accuracy_knn = metrics_knn['accuracy']
fpr = metrics_knn['fpr']
tpr = metrics_knn['tpr']
auc_knn = metrics_knn['auc']

test_error_rate_knn = 1 - accuracy_knn

# Confusion Matrix
cross_tab_knn = create_conf_mat(test_class_set, predictions_knn)

# RF ------------------------------------------------
metrics_rf = produce_model_metrics(fit_rf, test_set, test_class_set, 'rf')
# Call each value from dictionary
predictions_rf = metrics_rf['predictions']
accuracy_rf = metrics_rf['accuracy']
fpr2 = metrics_rf['fpr']
tpr2 = metrics_rf['tpr']
auc_rf = metrics_rf['auc']

test_error_rate_rf = 1 - accuracy_rf

cross_tab_rf = create_conf_mat(test_class_set, predictions_rf)

# NN ----------------------------------------
Exemple #2
0
    # cv_rf = GridSearchCV(fit_rf, cv = 10,
    #	                 param_grid=param_dist,
    #                     n_jobs = 3)
    # cv_rf.fit(training_set, class_set)
    # print('Best Parameters using grid search: \n',
    #	cv_rf.best_params_)
    # end = time.time()
    # print('Time taken in grid search: {0: .2f}'\
    #.format(end - start))

    # Test Set Calculations -------------------------------------
    # Test error rate
    test_error_rate_rf = 1 - accuracy_rf

    # Confusion Matrix
    test_crosstb = hf.create_conf_mat(test_class_set, predictions_rf)

    # Print Variable Importance
    hf.variable_importance(importances_rf, indices_rf)

    # Cross validation
    print('Cross Validation:')
    hf.cross_val_metrics(fit_rf, training_set, class_set, print_results=True)

    print('Confusion Matrix:')
    print(test_crosstb, '\n')

    print("Here is our mean accuracy on the test set:\n {0: 0.3f}"\
        .format(accuracy_rf))

    print("The test error rate for our model is:\n {0: .3f}"\