Beispiel #1
0
def test_compute_IoU(download_release):
    m = main.deepforest()
    m.use_release(check_release=False)
    csv_file = get_data("OSBS_029.csv")
    predictions = m.predict_file(csv_file=csv_file,
                                 root_dir=os.path.dirname(csv_file))
    ground_truth = pd.read_csv(csv_file)

    predictions['geometry'] = predictions.apply(
        lambda x: shapely.geometry.box(x.xmin, x.ymin, x.xmax, x.ymax), axis=1)
    predictions = gpd.GeoDataFrame(predictions, geometry='geometry')

    ground_truth['geometry'] = ground_truth.apply(
        lambda x: shapely.geometry.box(x.xmin, x.ymin, x.xmax, x.ymax), axis=1)
    ground_truth = gpd.GeoDataFrame(ground_truth, geometry='geometry')

    ground_truth.label = 0
    predictions.label = 0
    visualize.plot_prediction_dataframe(df=predictions,
                                        ground_truth=ground_truth,
                                        root_dir=os.path.dirname(csv_file))

    result = IoU.compute_IoU(ground_truth, predictions)
    assert result.shape[0] == ground_truth.shape[0]
    assert sum(result.IoU) > 10
def test_evaluate_multi(m):
    csv_file = get_data("testfile_multi.csv")
    m = main.deepforest(num_classes=2, label_dict={"Alive": 0, "Dead": 1})
    ground_truth = pd.read_csv(csv_file)

    results = evaluate.evaluate(predictions=ground_truth,
                                ground_df=ground_truth,
                                show_plot=True,
                                root_dir=os.path.dirname(csv_file),
                                savedir=None)

    assert results["results"].shape[0] == ground_truth.shape[0]
    assert results["class_recall"].shape == (2, 4)
    assert all(results['class_recall'].recall == pd.Series([1, 1]))


#def test_evaluate_benchmark(m):
#csv_file = "/Users/benweinstein/Documents/NeonTreeEvaluation/evaluation/RGB/benchmark_annotations.csv"
#predictions = m.predict_file(csv_file=csv_file, root_dir=os.path.dirname(csv_file))
#ground_truth = pd.read_csv(csv_file)

#results = evaluate.evaluate(predictions=predictions, ground_df=ground_truth, show_plot=False, root_dir=os.path.dirname(csv_file), savedir=None)

#assert results["results"].shape[0] == ground_truth.shape[0]
#assert results["class_recall"].shape == (2,4)
Beispiel #3
0
def test_reload_multi_class(two_class_m, tmpdir):
    two_class_m.config["train"]["fast_dev_run"] = True
    two_class_m.create_trainer()
    two_class_m.trainer.fit(two_class_m)
    two_class_m.save_model("{}/checkpoint.pl".format(tmpdir))
    loaded = main.deepforest(num_classes=2, label_dict={"Alive": 0, "Dead": 0})
    old_model = torch.load("{}/checkpoint.pl".format(tmpdir))
    loaded.load_state_dict(old_model["state_dict"])
Beispiel #4
0
def test_annotations_to_shapefile(download_release):
    img = get_data("OSBS_029.tif")
    r = rio.open(img)
    transform = r.transform 
    crs = r.crs
    m = main.deepforest()
    m.use_release(check_release=False)
    df = m.predict_image(path=img)
    gdf = utilities.annotations_to_shapefile(df, transform=transform, crs=crs)
    assert df.shape[0] == gdf.shape[0]
Beispiel #5
0
def test_evaluate_save_images(m, tmpdir):
    csv_file = get_data("testfile_multi.csv")
    m = main.deepforest(num_classes=2,label_dict={"Alive":0,"Dead":1})
    ground_truth = pd.read_csv(csv_file)
    ground_truth["label"] = ground_truth.label.astype("category").cat.codes
    
    #Manipulate the data to create some false positives
    predictions = ground_truth.copy()
    predictions["score"] = 1
    predictions.label.loc[[36,35,34]] = 0
    results = evaluate.evaluate(predictions=predictions, ground_df=ground_truth, root_dir=os.path.dirname(csv_file), savedir=tmpdir)     
    assert all([os.path.exists("{}/{}".format(tmpdir,x)) for x in ground_truth.image_path])
Beispiel #6
0
def m(download_release):
    m = main.deepforest()
    m.config["train"]["csv_file"] = get_data("example.csv") 
    m.config["train"]["root_dir"] = os.path.dirname(get_data("example.csv"))
    m.config["train"]["fast_dev_run"] = False
    m.config["batch_size"] = 2
       
    m.config["validation"]["csv_file"] = get_data("example.csv") 
    m.config["validation"]["root_dir"] = os.path.dirname(get_data("example.csv"))
    
    m.use_release()
    
    return m
def test_evaluate_empty():
    m = main.deepforest()
    m.config["score_thresh"] = 0.8
    csv_file = get_data("OSBS_029.csv")
    root_dir = os.path.dirname(csv_file)
    results = m.evaluate(csv_file, root_dir, iou_threshold=0.4)

    #Does this make reasonable predictions, we know the model works.
    assert results["box_precision"] == 0
    assert results["box_recall"] == 0

    df = pd.read_csv(csv_file)
    assert results["results"].shape[0] == df.shape[0]
def two_class_m():
    m = main.deepforest(num_classes=2,label_dict={"Alive":0,"Dead":1})
    m.config["train"]["csv_file"] = get_data("testfile_multi.csv") 
    m.config["train"]["root_dir"] = os.path.dirname(get_data("testfile_multi.csv"))
    m.config["train"]["fast_dev_run"] = True
    m.config["batch_size"] = 2
        
    m.config["validation"]["csv_file"] = get_data("testfile_multi.csv") 
    m.config["validation"]["root_dir"] = os.path.dirname(get_data("testfile_multi.csv"))

    m.create_trainer()
    
    return m
Beispiel #9
0
def test_evaluate_multi(m):
    csv_file = get_data("testfile_multi.csv")
    m = main.deepforest(num_classes=2,label_dict={"Alive":0,"Dead":1})
    ground_truth = pd.read_csv(csv_file)
    ground_truth["label"] = ground_truth.label.astype("category").cat.codes
    
    #Manipulate the data to create some false positives
    predictions = ground_truth.copy()
    predictions["score"] = 1
    predictions.label.loc[[36,35,34]] = 0
    results = evaluate.evaluate(predictions=predictions, ground_df=ground_truth, root_dir=os.path.dirname(csv_file))     
        
    assert results["results"].shape[0] == ground_truth.shape[0]
    assert results["class_recall"].shape == (2,4)
def m():
    m = main.deepforest()
    m.config["train"]["csv_file"] = get_data("example.csv")
    m.config["train"]["root_dir"] = os.path.dirname(get_data("example.csv"))
    m.config["train"]["fast_dev_run"] = True
    m.config["batch_size"] = 2

    m.config["validation"]["csv_file"] = get_data("example.csv")
    m.config["validation"]["root_dir"] = os.path.dirname(
        get_data("example.csv"))

    m.create_trainer()

    return m
Beispiel #11
0
def test_save_and_reload_weights(m, tmpdir):
    img_path = get_data(path="2019_YELL_2_528000_4978000_image_crop2.png")
    m.config["train"]["fast_dev_run"] = True
    m.create_trainer()
    #save the prediction dataframe after training and compare with prediction after reload checkpoint
    m.trainer.fit(m)
    pred_after_train = m.predict_image(path=img_path)
    torch.save(m.model.state_dict(), "{}/checkpoint.pt".format(tmpdir))

    #reload the checkpoint to model object
    after = main.deepforest()
    after.model.load_state_dict(torch.load("{}/checkpoint.pt".format(tmpdir)))
    pred_after_reload = after.predict_image(path=img_path)

    assert not pred_after_train.empty
    assert not pred_after_reload.empty
    pd.testing.assert_frame_equal(pred_after_train, pred_after_reload)
Beispiel #12
0
def m(download_release):
    m = main.deepforest()
    m.config["train"]["csv_file"] = get_data("example.csv")
    m.config["train"]["root_dir"] = os.path.dirname(get_data("example.csv"))
    m.config["train"]["fast_dev_run"] = True
    m.config["batch_size"] = 2

    m.config["validation"]["csv_file"] = get_data("example.csv")
    m.config["validation"]["root_dir"] = os.path.dirname(
        get_data("example.csv"))
    m.config["workers"] = 0
    m.config["validation"]["val_accuracy_interval"] = 1
    m.config["train"]["epochs"] = 2

    m.create_trainer()
    m.use_release(check_release=False)

    return m
Beispiel #13
0
def test_log_images_multiclass(m, tmpdir):
    m = main.deepforest(num_classes=2, label_dict={"Alive":0,"Dead":1})
    m.config["train"]["csv_file"] = get_data("testfile_multi.csv") 
    m.config["train"]["root_dir"] = os.path.dirname(get_data("testfile_multi.csv"))
    m.config["train"]["fast_dev_run"] = False
    m.config["batch_size"] = 2
       
    m.config["validation"]["csv_file"] = get_data("testfile_multi.csv") 
    m.config["validation"]["root_dir"] = os.path.dirname(get_data("testfile_multi.csv"))

    im_callback = callbacks.images_callback(csv_file=m.config["validation"]["csv_file"], root_dir=m.config["validation"]["root_dir"], savedir=tmpdir)
    m.create_trainer(callbacks=[im_callback])
    m.max_steps = 2
    m.trainer.fit(m)
    saved_images = glob.glob("{}/*.png".format(tmpdir))
    assert len(saved_images) == 1
    
    
    
Beispiel #14
0
def test_override_transforms():
    def get_transform(augment):
        """This is the new transform"""
        if augment:
            transform = A.Compose([A.HorizontalFlip(p=0.5),
                                   ToTensorV2()],
                                  bbox_params=A.BboxParams(
                                      format='pascal_voc',
                                      label_fields=["category_ids"]))

        else:
            transform = ToTensorV2()

        return transform

    m = main.deepforest(transforms=get_transform)

    csv_file = get_data("example.csv")
    root_dir = os.path.dirname(csv_file)
    train_ds = m.load_dataset(csv_file, root_dir=root_dir)

    path, image, target = next(iter(train_ds))
    assert m.transforms.__doc__ == "This is the new transform"
def m(download_release):
    m = main.deepforest()
    m.use_release()

    return m
def test_environment():
    from deepforest.main import deepforest
    deepforest_class = deepforest()
import os
import cProfile, pstats


def run(m):

    csv_file = get_data("OSBS_029.csv")
    predictions = m.predict_file(csv_file=csv_file,
                                 root_dir=os.path.dirname(csv_file))
    predictions.label = "Tree"
    ground_truth = pd.read_csv(csv_file)
    results = evaluate.evaluate(predictions=predictions,
                                ground_df=ground_truth,
                                root_dir=os.path.dirname(csv_file),
                                savedir=None)


if __name__ == "__main__":
    m = main.deepforest()
    m.use_release()

    profiler = cProfile.Profile()
    profiler.enable()
    m = main.deepforest()
    m.use_release()
    run(m)
    profiler.disable()
    stats = pstats.Stats(profiler).sort_stats('cumtime')
    stats.print_stats()
    stats.dump_stats('evaluate.prof')
Beispiel #18
0
def test_custom_config_file_path(tmpdir):
    print(os.getcwd())
    m = main.deepforest(config_file='tests/deepforest_config_test.yml')
    assert m.config["batch_size"] == 9999
    assert m.config["nms_thresh"] == 0.9
    assert m.config["score_thresh"] == 0.9
def m(download_release):
    m = main.deepforest()
    m.use_release(check_release=False)

    return m
 def __init__(self):
     self.model = main.deepforest()
     self.model.use_release()