Пример #1
0
def compute_save_auc(data_set_name, image_pred_method, res_path, image_labels,
                     image_predictions, class_name):
    '''
    It computes and saves the results in a file. ROC curve and confusion matrix visualizations are also done here.
    :param data_set_name:
    :param image_pred_method:
    :param res_path:
    :param image_labels:
    :param image_predictions:
    :param class_name:
    :return:
    '''
    auc_all_classes_v1, fpr, tpr, roc_auc = compute_auc_1class(
        image_labels, image_predictions)
    save_evaluation_results([class_name], auc_all_classes_v1, 'auc_prob_' +
                            data_set_name + '_' + image_pred_method + '.csv',
                            res_path)

    plot_roc_curve(fpr, tpr, roc_auc, data_set_name, res_path)
    conf_matrix = confusion_matrix(
        image_labels, np.array(image_predictions > 0.5, dtype=np.float32))

    plot_confusion_matrix(conf_matrix, [0, 1],
                          res_path,
                          data_set_name,
                          normalize=False,
                          title=None)
    plot_confusion_matrix(conf_matrix, [0, 1],
                          res_path,
                          data_set_name + 'norm',
                          normalize=True,
                          title=None)
Пример #2
0
def compute_save_inst_auc_results(data_set_name, res_path, inst_auc):
    mean_auc = np.mean(inst_auc, axis=0)
    print("Instance AUC")
    print(mean_auc)
    save_evaluation_results(["inst_auc"],
                            mean_auc,
                            "inst_auc_" + data_set_name + '.csv',
                            res_path,
                            add_col=None,
                            add_value=None)
Пример #3
0
def compute_save_dice_results(data_set_name, res_path, has_bbox, dice_scores):
    dice_score_ma = np.ma.masked_array(dice_scores,
                                       mask=np.equal(dice_scores, -1))
    mean_dice = np.mean(dice_score_ma, axis=0)
    print("DICE")
    print(mean_dice)
    save_evaluation_results(["dice"],
                            mean_dice,
                            "dice_" + data_set_name + '.csv',
                            res_path,
                            add_col=None,
                            add_value=None)
Пример #4
0
def compute_save_accuracy_results(data_set_name, res_path, has_bbox,
                                  acc_localization):
    print("accuracy bbox present vs accurate")
    total_accurate_segmentations = np.sum(acc_localization, axis=0)
    total_segmentation = np.sum(has_bbox, axis=0)

    with np.errstate(divide='ignore', invalid='ignore'):
        acc_class = total_accurate_segmentations / total_segmentation
    print("ACCURACY RESULTS FROM BBOX")
    print(acc_class)
    save_evaluation_results(["accuracy"],
                            acc_class,
                            "accuracy_" + data_set_name + '.csv',
                            res_path,
                            add_col=None,
                            add_value=None)
Пример #5
0
def compute_save_dice_results(eval_df, data_set_name, res_path, dice_scores):
    dice_score_ma = np.ma.masked_array(dice_scores, mask=np.equal(dice_scores, -1))
    mean_dice = np.mean(dice_score_ma, axis=0)
    print("DICE")
    print(mean_dice)
    # save_evaluation_results(["dice"], mean_dice, "dice_" + data_set_name + '.csv', res_path,
    #                         add_col=None, add_value=None)
    return save_evaluation_results(eval_df, ["dice"], mean_dice, "evaluation_performance_" + data_set_name + '.csv',
                                   res_path,
                                   add_col=None, add_value=None)