import os import pandas as pd import glob import numpy as np from datetime import datetime from DeepForest.config import load_config from DeepForest import preprocess #save time for logging dirname = datetime.now().strftime("%Y%m%d_%H%M%S") #Load DeepForest_config and data file based on training or retraining mode DeepForest_config = load_config("train") data = preprocess.load_data(DeepForest_config["training_csvs"], DeepForest_config["rgb_res"], DeepForest_config["lidar_path"]) #Log site site = DeepForest_config["evaluation_site"] ##Preprocess Filters## if DeepForest_config['preprocess']['zero_area']: data = preprocess.zero_area(data) #pass an args object instead of using command line args = [ "--epochs", str(DeepForest_config["epochs"]), "--batch-size", str(DeepForest_config['batch_size']), "--backbone", str(DeepForest_config["backbone"]), "--score-threshold",
#parse logs parser = argparse.ArgumentParser() parser.add_argument("--logdir", help="", type=str, default="20180523-181550/") parser.add_argument( "--box_file", help="", type=str, default= "data/bounding_boxes_NEON_D03_OSBS_DP1_398000_3280000_classified_point_cloud_laz.csv" ) args = parser.parse_args() #load model model = load_model(args.logdir) #load data data = preprocess.load_data(args.box_file) #predict preds, labels = predict(model, data) #confusion matrix tn, fp, fn, tp = calculate_confusion(labels, preds) print( "True Negative Rate %.3f\nTrue Positive Rate %.3f\nFalse Negative Rate %.3f\nFalse Positive Rate %.3f" % (tn, tp, fn, fp))
from DeepForest.config import config from DeepForest import preprocess import random batch_size = config['batch_size'] #set experiment and log configs experiment = Experiment(api_key="ypQZhYfs3nSyKzOfz13iuJpj2", project_name='deepforest-retinanet') ##Set seed for reproducibility## np.random.seed(2) #Load data and combine into a large frame #Training data = preprocess.load_data(data_dir=config['training_csvs']) #Evaluation evaluation = preprocess.load_data(data_dir=config['evaluation_csvs']) ##Preprocess Filters## if config['preprocess']['zero_area']: data = preprocess.zero_area(data) evaluation = preprocess.zero_area(evaluation) #Write training and evaluation data to file for annotations data.to_csv("data/training/detection.csv") evaluation.to_csv("data/training/evaluation.csv") #log data size and set number of steps
def sample(n=50): """ Grab n random images from across the site """ #Load config DeepForest_config = config.load_config() #Read in data data = preprocess.load_data(data_dir=DeepForest_config['training_csvs'], res=0.1, lidar_path=DeepForest_config["lidar_path"]) #Create windows windows = preprocess.create_windows( data, DeepForest_config, base_dir=DeepForest_config["evaluation_tile_dir"]) selected_windows = windows[["tile", "window"]].drop_duplicates().sample(n=n) generator = onthefly_generator.OnTheFlyGenerator( data=data, windowdf=selected_windows, DeepForest_config=DeepForest_config) folder_dir = os.path.join("data", DeepForest_config["evaluation_site"], "samples") if not os.path.exists(folder_dir): os.mkdir(folder_dir) for i in range(generator.size()): #Load image - done for side effects, allow to skip bad tiles. try: three_channel = generator.load_image(i) except: continue #load lidar generator.load_lidar_tile() generator.clip_las() if generator.clipped_las == None: continue #name RGB tilename = os.path.splitext(generator.image_data[i]["tile"])[0] tilename = tilename + "_" + str( generator.image_data[i]["window"]) + ".tif" filename = os.path.join(folder_dir, tilename) #Write cv2.imwrite(filename, generator.image) #name .laz tilename = os.path.splitext(generator.image_data[i]["tile"])[0] tilename = tilename + "_" + str( generator.image_data[i]["window"]) + ".laz" filename = os.path.join(folder_dir, tilename) #Write .laz generator.clipped_las.write(filename)
from DeepForest import preprocess, evaluate from models import inception batch_size = config['data_generator_params']['batch_size'] #set experiment and log configs experiment = Experiment(api_key="ypQZhYfs3nSyKzOfz13iuJpj2", project_name='deepforest') experiment.log_multiple_params(config['data_generator_params']) experiment.log_multiple_params(config['training']) ##Set seed for reproducibility## np.random.seed(2) #Load data and comine into a large data = preprocess.load_data(data_dir=config['bbox_data_dir'], nsamples=config["subsample"]) ##Preprocess Filters## data = preprocess.zero_area(data) #Partition data in training and testing dataframes msk = np.random.rand(len(data)) < 0.8 train = data[msk] test = data[~msk] #log data size experiment.log_parameter("training_samples", train.shape[0]) experiment.log_parameter("testing_samples", test.shape[0]) #Create dictionaries to keep track of labels and splits partition = {"train": train.index.values, "test": test.index.values}