def calculate_overall_stats(predictions, targets, train_set_metadata): overall_stats = {} confusion_matrix = ConfusionMatrix(targets, predictions[PREDICTIONS], labels=['False', 'True']) overall_stats['confusion_matrix'] = confusion_matrix.cm.tolist() overall_stats['overall_stats'] = confusion_matrix.stats() overall_stats['per_class_stats'] = confusion_matrix.per_class_stats() fpr, tpr, thresholds = roc_curve(targets, predictions[PROBABILITIES]) overall_stats['roc_curve'] = { 'false_positive_rate': fpr.tolist(), 'true_positive_rate': tpr.tolist() } overall_stats['roc_auc_macro'] = roc_auc_score( targets, predictions[PROBABILITIES], average='macro') overall_stats['roc_auc_micro'] = roc_auc_score( targets, predictions[PROBABILITIES], average='micro') ps, rs, thresholds = precision_recall_curve(targets, predictions[PROBABILITIES]) overall_stats['precision_recall_curve'] = { 'precisions': ps.tolist(), 'recalls': rs.tolist() } overall_stats['average_precision_macro'] = average_precision_score( targets, predictions[PROBABILITIES], average='macro') overall_stats['average_precision_micro'] = average_precision_score( targets, predictions[PROBABILITIES], average='micro') overall_stats['average_precision_samples'] = average_precision_score( targets, predictions[PROBABILITIES], average='samples') return overall_stats
def calculate_overall_stats(predictions, targets, train_set_metadata): overall_stats = {} confusion_matrix = ConfusionMatrix(targets, predictions[PREDICTIONS], labels=["False", "True"]) overall_stats["confusion_matrix"] = confusion_matrix.cm.tolist() overall_stats["overall_stats"] = confusion_matrix.stats() overall_stats["per_class_stats"] = confusion_matrix.per_class_stats() fpr, tpr, thresholds = roc_curve(targets, predictions[PROBABILITIES]) overall_stats["roc_curve"] = { "false_positive_rate": fpr.tolist(), "true_positive_rate": tpr.tolist(), } overall_stats["roc_auc_macro"] = roc_auc_score( targets, predictions[PROBABILITIES], average="macro") overall_stats["roc_auc_micro"] = roc_auc_score( targets, predictions[PROBABILITIES], average="micro") ps, rs, thresholds = precision_recall_curve(targets, predictions[PROBABILITIES]) overall_stats["precision_recall_curve"] = { "precisions": ps.tolist(), "recalls": rs.tolist(), } overall_stats["average_precision_macro"] = average_precision_score( targets, predictions[PROBABILITIES], average="macro") overall_stats["average_precision_micro"] = average_precision_score( targets, predictions[PROBABILITIES], average="micro") overall_stats["average_precision_samples"] = average_precision_score( targets, predictions[PROBABILITIES], average="samples") return overall_stats
def calculate_overall_stats(test_stats, output_feature, dataset, train_set_metadata): feature_name = output_feature['name'] stats = test_stats[feature_name] confusion_matrix = ConfusionMatrix(dataset.get(feature_name), stats[PREDICTIONS], labels=['False', 'True']) stats['confusion_matrix'] = confusion_matrix.cm.tolist() stats['overall_stats'] = confusion_matrix.stats() stats['per_class_stats'] = confusion_matrix.per_class_stats() fpr, tpr, thresholds = roc_curve(dataset.get(feature_name), stats[PROBABILITIES]) stats['roc_curve'] = { 'false_positive_rate': fpr.tolist(), 'true_positive_rate': tpr.tolist() } stats['roc_auc_macro'] = roc_auc_score(dataset.get(feature_name), stats[PROBABILITIES], average='macro') stats['roc_auc_micro'] = roc_auc_score(dataset.get(feature_name), stats[PROBABILITIES], average='micro') ps, rs, thresholds = precision_recall_curve(dataset.get(feature_name), stats[PROBABILITIES]) stats['precision_recall_curve'] = { 'precisions': ps.tolist(), 'recalls': rs.tolist() } stats['average_precision_macro'] = average_precision_score( dataset.get(feature_name), stats[PROBABILITIES], average='macro') stats['average_precision_micro'] = average_precision_score( dataset.get(feature_name), stats[PROBABILITIES], average='micro') stats['average_precision_samples'] = average_precision_score( dataset.get(feature_name), stats[PROBABILITIES], average='samples')
def calculate_overall_stats(predictions, targets, train_set_metadata): overall_stats = {} confusion_matrix = ConfusionMatrix( targets, predictions[PREDICTIONS], labels=train_set_metadata['idx2str']) overall_stats['confusion_matrix'] = confusion_matrix.cm.tolist() overall_stats['overall_stats'] = confusion_matrix.stats() overall_stats['per_class_stats'] = confusion_matrix.per_class_stats() return overall_stats
def calculate_overall_stats(test_stats, output_feature, dataset, train_set_metadata): feature_name = output_feature['name'] stats = test_stats[feature_name] confusion_matrix = ConfusionMatrix( dataset.get(feature_name), stats[PREDICTIONS], labels=train_set_metadata[feature_name]['idx2str']) stats['confusion_matrix'] = confusion_matrix.cm.tolist() stats['overall_stats'] = confusion_matrix.stats() stats['per_class_stats'] = confusion_matrix.per_class_stats()
def calculate_overall_stats(predictions, targets, train_set_metadata): overall_stats = {} sequences = targets last_elem_sequence = sequences[np.arange(sequences.shape[0]), (sequences != 0).cumsum(1).argmax(1)] confusion_matrix = ConfusionMatrix( last_elem_sequence, predictions[LAST_PREDICTIONS], labels=train_set_metadata['idx2str']) overall_stats['confusion_matrix'] = confusion_matrix.cm.tolist() overall_stats['overall_stats'] = confusion_matrix.stats() overall_stats['per_class_stats'] = confusion_matrix.per_class_stats() return overall_stats
def calculate_overall_stats(test_stats, output_feature, dataset, train_set_metadata): feature_name = output_feature['name'] sequences = dataset.get(feature_name) last_elem_sequence = sequences[np.arange(sequences.shape[0]), (sequences != 0).cumsum(1).argmax(1)] stats = test_stats[feature_name] confusion_matrix = ConfusionMatrix( last_elem_sequence, stats[LAST_PREDICTIONS], labels=train_set_metadata[feature_name]['idx2str']) stats['confusion_matrix'] = confusion_matrix.cm.tolist() stats['overall_stats'] = confusion_matrix.stats() stats['per_class_stats'] = confusion_matrix.per_class_stats()