Beispiel #1
0
def evaluate(predictions,
             ground_df,
             root_dir,
             show_plot=True,
             iou_threshold=0.4,
             savedir=None):
    """Image annotated crown evaluation routine
    submission can be submitted as a .shp, existing pandas dataframe or .csv path

    Args:
        predictions: a pandas dataframe, if supplied a root dir is needed to give the relative path of files in df.name
        ground_df: a pandas dataframe, if supplied a root dir is needed to give the relative path of files in df.name
        root_dir: location of files in the dataframe 'name' column.
        show_plot: Whether to show boxes as they are plotted
    Returns:
        results: a dataframe of match bounding boxes
        box_recall: proportion of true positives of box position, regardless of class
        box_precision: proportion of predictions that are true positive, regardless of class
        class_recall: a pandas dataframe of class level recall and precision with class sizes
    """

    check_file(ground_df)
    check_file(predictions)

    # Run evaluation on all plots
    results = []
    box_recalls = []
    box_precisions = []
    for image_path, group in predictions.groupby("image_path"):

        #clean indices
        plot_ground_truth = ground_df[ground_df["image_path"] ==
                                      image_path].reset_index(drop=True)
        group = group.reset_index(drop=True)
        result = evaluate_image(predictions=group,
                                ground_df=plot_ground_truth,
                                show_plot=show_plot,
                                root_dir=root_dir,
                                savedir=savedir)
        result["image_path"] = image_path
        result["match"] = result.IoU > iou_threshold
        true_positive = sum(result["match"])
        recall = true_positive / result.shape[0]
        precision = true_positive / group.shape[0]

        box_recalls.append(recall)
        box_precisions.append(precision)
        results.append(result)

    if len(results) == 0:
        print("No predictions made, setting precision and recall to 0")
        box_recall = 0
        box_precision = 0
    else:
        results = pd.concat(results)
        box_precision = np.mean(box_precisions)
        box_recall = np.mean(box_recalls)

    #Per class recall and precision
    class_recall_dict = {}
    class_precision_dict = {}
    class_size = {}

    for name, group in result.groupby("true_label"):
        class_recall_dict[name] = sum(
            group.true_label == group.predicted_label) / ground_df.shape[0]
        class_precision_dict[name] = sum(
            group.true_label == group.predicted_label) / predictions.shape[0]
        class_size[name] = group.size

    class_recall = pd.DataFrame({
        "label": class_recall_dict.keys(),
        "recall": pd.Series(class_recall_dict),
        "precision": pd.Series(class_precision_dict),
        "size": pd.Series(class_size)
    }).reset_index(drop=True)

    return {
        "results": results,
        "box_precision": box_precision,
        "box_recall": box_recall,
        "class_recall": class_recall
    }
Beispiel #2
0
def evaluate(predictions,
             ground_df,
             root_dir,
             iou_threshold=0.4,
             savedir=None):
    """Image annotated crown evaluation routine
    submission can be submitted as a .shp, existing pandas dataframe or .csv path

    Args:
        predictions: a pandas dataframe, if supplied a root dir is needed to give the relative path of files in df.name. The labels in ground truth and predictions must match. If one is numeric, the other must be numeric.
        ground_df: a pandas dataframe, if supplied a root dir is needed to give the relative path of files in df.name
        root_dir: location of files in the dataframe 'name' column.
    Returns:
        results: a dataframe of match bounding boxes
        box_recall: proportion of true positives of box position, regardless of class
        box_precision: proportion of predictions that are true positive, regardless of class
        class_recall: a pandas dataframe of class level recall and precision with class sizes
    """

    check_file(ground_df)
    check_file(predictions)

    # Run evaluation on all plots
    results = []
    box_recalls = []
    box_precisions = []
    for image_path, group in ground_df.groupby("image_path"):
        #clean indices
        image_predictions = predictions[predictions["image_path"] ==
                                        image_path].reset_index(drop=True)

        #If empty, add to list without computing IoU
        if image_predictions.empty:
            result = pd.DataFrame({
                "truth_id": group.index.values,
                "prediction_id": None,
                "IoU": 0,
                "predicted_label": None,
                "score": None,
                "match": None,
                "true_label": group.label
            })
            #An empty prediction set has recall of 0, precision of NA.
            box_recalls.append(0)
            results.append(result)
            continue
        else:
            group = group.reset_index(drop=True)
            result = evaluate_image(predictions=image_predictions,
                                    ground_df=group,
                                    root_dir=root_dir,
                                    savedir=savedir)

        result["image_path"] = image_path
        result["match"] = result.IoU > iou_threshold
        true_positive = sum(result["match"])
        recall = true_positive / result.shape[0]
        precision = true_positive / image_predictions.shape[0]

        box_recalls.append(recall)
        box_precisions.append(precision)
        results.append(result)

    results = pd.concat(results)
    box_precision = np.mean(box_precisions)
    box_recall = np.mean(box_recalls)

    #Per class recall and precision
    class_recall_dict = {}
    class_precision_dict = {}
    class_size = {}

    box_results = results[results.predicted_label.notna()]
    if box_results.empty:
        print("No predictions made")
        box_recall = 0
        box_precision = 0
        class_recall = pd.DataFrame()
        return {
            "results": results,
            "box_precision": box_precision,
            "box_recall": box_recall,
            "class_recall": class_recall
        }

    for name, group in box_results.groupby("true_label"):
        class_recall_dict[name] = sum(
            group.true_label == group.predicted_label) / group.shape[0]
        number_of_predictions = box_results[box_results.predicted_label ==
                                            name].shape[0]
        if number_of_predictions == 0:
            class_precision_dict[name] = 0
        else:
            class_precision_dict[name] = sum(
                group.true_label ==
                group.predicted_label) / number_of_predictions
        class_size[name] = group.shape[0]

    class_recall = pd.DataFrame({
        "label": class_recall_dict.keys(),
        "recall": pd.Series(class_recall_dict),
        "precision": pd.Series(class_precision_dict),
        "size": pd.Series(class_size)
    }).reset_index(drop=True)

    return {
        "results": results,
        "box_precision": box_precision,
        "box_recall": box_recall,
        "class_recall": class_recall
    }