def get_scores(y_true,y_pred): brier_score = brier_score_loss(y_true,y_pred) log_score = log_loss(y_true,y_pred) roc_score = roc_auc_score(y_true, y_pred) pr_score = average_precision_score(y_true,y_pred) r2score = r2_score(y_true,y_pred) return math.sqrt(brier_score),log_score,roc_score,pr_score,r2score
def get_scores(shots): y_true = [shot.result for shot in shots] y_pred = [shot.pred for shot in shots] brier_score = brier_score_loss(y_true,y_pred) log_score = log_loss(y_true,y_pred) roc_score = roc_auc_score(y_true, y_pred) pr_score = average_precision_score(y_true,y_pred) r2score = r2_score(y_true,y_pred) return math.sqrt(brier_score),log_score,roc_score,pr_score,r2score
def get_classification_metrics(ground_truth_labels, predicted_labels): classification_metric_dict = dict({}) classification_metric_dict['accuracy'] = accuracy_score( ground_truth_labels, predicted_labels) classification_metric_dict['precision'] = precision_score( ground_truth_labels, predicted_labels, average='weighted') classification_metric_dict['recall'] = recall_score(ground_truth_labels, predicted_labels, average='weighted') classification_metric_dict['f1_score'] = f1_score(ground_truth_labels, predicted_labels, average='weighted') classification_metric_dict['brier_score_loss'] = brier_score_loss( ground_truth_labels, predicted_labels) classification_metric_dict['matthews_corr_coef'] = matthews_corrcoef( ground_truth_labels, predicted_labels) classification_metric_dict['jaccard_score'] = jaccard_score( ground_truth_labels, predicted_labels, average='weighted') classification_metric_dict['cohen_kappa_score'] = cohen_kappa_score( ground_truth_labels, predicted_labels) return classification_metric_dict