def test_evaluate_multi(m):
    csv_file = get_data("testfile_multi.csv")
    m = main.deepforest(num_classes=2, label_dict={"Alive": 0, "Dead": 1})
    ground_truth = pd.read_csv(csv_file)

    results = evaluate.evaluate(predictions=ground_truth,
                                ground_df=ground_truth,
                                show_plot=True,
                                root_dir=os.path.dirname(csv_file),
                                savedir=None)

    assert results["results"].shape[0] == ground_truth.shape[0]
    assert results["class_recall"].shape == (2, 4)
    assert all(results['class_recall'].recall == pd.Series([1, 1]))


#def test_evaluate_benchmark(m):
#csv_file = "/Users/benweinstein/Documents/NeonTreeEvaluation/evaluation/RGB/benchmark_annotations.csv"
#predictions = m.predict_file(csv_file=csv_file, root_dir=os.path.dirname(csv_file))
#ground_truth = pd.read_csv(csv_file)

#results = evaluate.evaluate(predictions=predictions, ground_df=ground_truth, show_plot=False, root_dir=os.path.dirname(csv_file), savedir=None)

#assert results["results"].shape[0] == ground_truth.shape[0]
#assert results["class_recall"].shape == (2,4)
Esempio n. 2
0
def run(m):

    csv_file = get_data("OSBS_029.csv")
    predictions = m.predict_file(csv_file=csv_file,
                                 root_dir=os.path.dirname(csv_file))
    predictions.label = "Tree"
    ground_truth = pd.read_csv(csv_file)
    results = evaluate.evaluate(predictions=predictions,
                                ground_df=ground_truth,
                                root_dir=os.path.dirname(csv_file),
                                savedir=None)
def test_evaluate(m):
    csv_file = get_data("OSBS_029.csv")
    predictions = m.predict_file(csv_file=csv_file, root_dir=os.path.dirname(csv_file))
    ground_truth = pd.read_csv(csv_file)
    
    results = evaluate.evaluate(predictions=predictions, ground_df=ground_truth, show_plot=True, root_dir=os.path.dirname(csv_file), savedir=None)     
        
    assert results["results"].shape[0] == ground_truth.shape[0]
    assert results["box_recall"] > 0.5
    assert results["class_recall"].shape == (1,4)
    assert results["results"].true_label.unique() == "Tree"
Esempio n. 4
0
    def evaluate(self, csv_file, root_dir, iou_threshold=None, savedir=None):
        """Compute intersection-over-union and precision/recall for a given iou_threshold

        Args:
            csv_file: location of a csv file with columns "name","xmin","ymin","xmax","ymax","label", each box in a row
            root_dir: location of files in the dataframe 'name' column.
            iou_threshold: float [0,1] intersection-over-union union between annotation and prediction to be scored true positive
            savedir: optional path dir to save evaluation images
        Returns:
            results: dict of ("results", "precision", "recall") for a given threshold
        """
        # Load on GPU is available
        if torch.cuda.is_available():
            self.model = self.model.to("cuda")

        self.model.eval()
        self.model.score_thresh = self.config["score_thresh"]

        predictions = predict.predict_file(
            model=self.model,
            csv_file=csv_file,
            root_dir=root_dir,
            savedir=savedir,
            device=self.current_device,
            iou_threshold=self.config["nms_thresh"])

        ground_df = pd.read_csv(csv_file)
        ground_df["label"] = ground_df.label.apply(
            lambda x: self.label_dict[x])

        #remove empty samples from ground truth
        ground_df = ground_df[~((ground_df.xmin == 0) & (ground_df.xmax == 0))]

        # if no arg for iou_threshold, set as config
        if iou_threshold is None:
            iou_threshold = self.config["validation"]["iou_threshold"]

        results = evaluate_iou.evaluate(predictions=predictions,
                                        ground_df=ground_df,
                                        root_dir=root_dir,
                                        iou_threshold=iou_threshold,
                                        savedir=savedir)

        #replace classes if not NUll, wrap in try catch if no predictions
        if not results["results"].empty:
            results["results"]["predicted_label"] = results["results"][
                "predicted_label"].apply(lambda x: self.numeric_to_label_dict[
                    x] if not pd.isnull(x) else x)
            results["results"]["true_label"] = results["results"][
                "true_label"].apply(lambda x: self.numeric_to_label_dict[x])
            results["predictions"] = predictions

        return results
Esempio n. 5
0
def test_evaluate_save_images(m, tmpdir):
    csv_file = get_data("testfile_multi.csv")
    m = main.deepforest(num_classes=2,label_dict={"Alive":0,"Dead":1})
    ground_truth = pd.read_csv(csv_file)
    ground_truth["label"] = ground_truth.label.astype("category").cat.codes
    
    #Manipulate the data to create some false positives
    predictions = ground_truth.copy()
    predictions["score"] = 1
    predictions.label.loc[[36,35,34]] = 0
    results = evaluate.evaluate(predictions=predictions, ground_df=ground_truth, root_dir=os.path.dirname(csv_file), savedir=tmpdir)     
    assert all([os.path.exists("{}/{}".format(tmpdir,x)) for x in ground_truth.image_path])
Esempio n. 6
0
def test_evaluate_multi(m):
    csv_file = get_data("testfile_multi.csv")
    m = main.deepforest(num_classes=2,label_dict={"Alive":0,"Dead":1})
    ground_truth = pd.read_csv(csv_file)
    ground_truth["label"] = ground_truth.label.astype("category").cat.codes
    
    #Manipulate the data to create some false positives
    predictions = ground_truth.copy()
    predictions["score"] = 1
    predictions.label.loc[[36,35,34]] = 0
    results = evaluate.evaluate(predictions=predictions, ground_df=ground_truth, root_dir=os.path.dirname(csv_file))     
        
    assert results["results"].shape[0] == ground_truth.shape[0]
    assert results["class_recall"].shape == (2,4)
Esempio n. 7
0
def test_evaluate(m):
    csv_file = get_data("OSBS_029.csv")
    predictions = m.predict_file(csv_file=csv_file, root_dir=os.path.dirname(csv_file))
    predictions.label = "Tree"
    ground_truth = pd.read_csv(csv_file)
    predictions = predictions.loc[range(10)]
    results = evaluate.evaluate(predictions=predictions, ground_df=ground_truth, root_dir=os.path.dirname(csv_file))     
        
    assert results["results"].shape[0] == ground_truth.shape[0]
    assert results["box_recall"] > 0.1
    assert results["class_recall"].shape == (1,4)
    assert results["class_recall"].recall.values == 1
    assert results["class_recall"].precision.values == 1
    assert "score" in results["results"].columns
    assert results["results"].true_label.unique() == "Tree"
Esempio n. 8
0
    def evaluate(self,
                 csv_file,
                 root_dir,
                 iou_threshold=None,
                 show_plot=False,
                 savedir=None):
        """Compute intersection-over-union and precision/recall for a given iou_threshold

        Args:
            df: a pandas-type dataframe (geopandas is fine) with columns "name","xmin","ymin","xmax","ymax","label", each box in a row
            root_dir: location of files in the dataframe 'name' column.
            iou_threshold: float [0,1] intersection-over-union union between annotation and prediction to be scored true positive
            show_plot: open a blocking matplotlib window to show plot and annotations, useful for debugging.
            savedir: optional path dir to save evaluation images
        Returns:
            results: dict of ("results", "precision", "recall") for a given threshold
        """
        self.model.eval()

        if not self.device.type == "cpu":
            self.model = self.model.to(self.device)

        predictions = predict.predict_file(
            model=self.model,
            csv_file=csv_file,
            root_dir=root_dir,
            savedir=savedir,
            device=self.device,
            iou_threshold=self.config["nms_thresh"])

        predictions["label"] = predictions.label.apply(
            lambda x: self.numeric_to_label_dict[x])
        ground_df = pd.read_csv(csv_file)

        # if no arg for iou_threshold, set as config
        if iou_threshold is None:
            iou_threshold = self.config["validation"]["iou_threshold"]

        results = evaluate_iou.evaluate(predictions=predictions,
                                        ground_df=ground_df,
                                        root_dir=root_dir,
                                        iou_threshold=iou_threshold,
                                        show_plot=show_plot)

        return results
def test_evaluate_multi(m):
    csv_file = get_data("testfile_multi.csv")
    predictions = m.predict_file(csv_file=csv_file, root_dir=os.path.dirname(csv_file))
    ground_truth = pd.read_csv(csv_file)
    
    results = evaluate.evaluate(predictions=predictions, ground_df=ground_truth, show_plot=True, root_dir=os.path.dirname(csv_file), savedir=None)     
        
    assert results["results"].shape[0] == ground_truth.shape[0]
    assert results["class_recall"].shape == (2,4)

#def test_evaluate_benchmark(m):
    #csv_file = "/Users/benweinstein/Documents/NeonTreeEvaluation/evaluation/RGB/benchmark_annotations.csv"
    #predictions = m.predict_file(csv_file=csv_file, root_dir=os.path.dirname(csv_file))
    #ground_truth = pd.read_csv(csv_file)
    
    #results = evaluate.evaluate(predictions=predictions, ground_df=ground_truth, show_plot=False, root_dir=os.path.dirname(csv_file), savedir=None)     
        
    #assert results["results"].shape[0] == ground_truth.shape[0]
    #assert results["class_recall"].shape == (2,4)