FN: {false_negatives(y_test, preds_test, threshold)}
""")

print("Expected results")
print(f"There are {sum(df['category'] == 'TP')} TP")
print(f"There are {sum(df['category'] == 'TN')} TN")
print(f"There are {sum(df['category'] == 'FP')} FP")
print(f"There are {sum(df['category'] == 'FN')} FN")


# Run the next cell to see a summary of evaluative metrics for the model predictions for each class. 

# In[8]:


util.get_performance_metrics(y, pred, class_labels)


# Right now it only has TP, TN, FP, FN. Throughout this assignment we'll fill in all the other metrics to learn more about our model performance.

# <a name='3-2'></a>
# ### 3.2 Accuracy

# 
# Let's use a threshold of .5 for the probability cutoff for our predictions for all classes and calculate our model's accuracy as we would normally do in a machine learning problem. 
# 
# $$accuracy = \frac{\text{true positives} + \text{true negatives}}{\text{true positives} + \text{true negatives} + \text{false positives} + \text{false negatives}}$$
# 
# Use this formula to compute accuracy below:

# <details>    
test_results.head()

# ground truth
y = test_results[class_labels].values
# predicted labels
pred = test_results[pred_labels].values


test_results[np.concatenate([class_labels, pred_labels])].head()

df = util.get_performance_metrics(y, pred, class_labels, 
                                  acc=util.get_accuracy, 
                                  prevalence=util.get_prevalence, 
                                  sens=util.get_sensitivity, 
                                  spec=util.get_specificity, 
                                  ppv=util.get_ppv, 
                                  npv=util.get_npv, 
                                  auc=util.roc_auc_score,
                                  f1=f1_score, 
                                  thresholds=thresholds)




util.get_curve(y, pred, class_labels)


#Precision-Recall is a useful measure of success of prediction when the classes are very imbalanced.
util.get_curve(y, pred, class_labels, curve='prc')

util.plot_calibration_curve(y, pred, class_labels)