Example #1
0
def _get_detections(generator,
                    model,
                    score_threshold=0.05,
                    max_detections=100,
                    save_path=None,
                    experiment=None):
    """ Get the detections from the model using the generator.

    The result is a list of lists such that the size is:
        all_detections[num_images][num_classes] = detections[num_detections, 4 + num_classes]

    # Arguments
        generator       : The generator used to run images through the model.
        model           : The model to run on the images.
        score_threshold : The score confidence threshold to use.
        max_detections  : The maximum number of detections to use per image.
        save_path       : The path to save the images with visualized detections to.
        experiment    : Comet ML experiment
    # Returns
        A list of lists containing the detections for each image in the generator.
    """
    all_detections = [[None for i in range(generator.num_classes())]
                      for j in range(generator.size())]

    for i in range(generator.size()):
        raw_image = generator.load_image(i)

        #need to make contigious see https://stackoverflow.com/questions/23830618/python-opencv-typeerror-layout-of-the-output-array-incompatible-with-cvmat
        raw_image = raw_image.copy()

        #Skip if missing a component data source
        if raw_image is None:
            print("Empty image, skipping")
            continue

        image = generator.preprocess_image(raw_image)
        image, scale = generator.resize_image(image)

        if keras.backend.image_data_format() == 'channels_first':
            image = image.transpose((2, 0, 1))

        # run network
        boxes, scores, labels = model.predict_on_batch(
            np.expand_dims(image, axis=0))[:3]

        # correct boxes for image scale
        boxes /= scale

        # select indices which have a score above the threshold
        indices = np.where(scores[0, :] > score_threshold)[0]

        # select those scores
        scores = scores[0][indices]

        # find the order with which to sort the scores
        scores_sort = np.argsort(-scores)[:max_detections]

        # select detections
        image_boxes = boxes[0, indices[scores_sort], :]
        image_scores = scores[scores_sort]
        image_labels = labels[0, indices[scores_sort]]
        image_detections = np.concatenate([
            image_boxes,
            np.expand_dims(image_scores, axis=1),
            np.expand_dims(image_labels, axis=1)
        ],
                                          axis=1)

        #drape boxes
        #get image name
        pc = postprocessing.drape_boxes(
            boxes=quality_boxes,
            tilename=generator.image_data[i]["tile"],
            lidar_dir=DeepForest_config["lidar_path"])

        #Skip if point density is too low
        if pc:
            #Get new bounding boxes
            new_boxes = postprocessing.cloud_to_box(pc)

        if save_path is not None:
            draw_annotations(raw_image,
                             generator.load_annotations(i),
                             label_to_name=generator.label_to_name)
            draw_detections(raw_image,
                            image_boxes,
                            image_scores,
                            image_labels,
                            label_to_name=generator.label_to_name,
                            score_threshold=score_threshold)

            image_name = generator.image_names[i]
            row = generator.image_data[image_name]
            fname = os.path.splitext(row["tile"])[0] + "_" + str(row["window"])

            #Write RGB
            cv2.imwrite(os.path.join(save_path, '{}.png'.format(fname)),
                        raw_image)

            if experiment:
                experiment.log_image(os.path.join(save_path,
                                                  '{}.png'.format(fname)),
                                     file_name=fname)

        # copy detections to all_detections
        for label in range(generator.num_classes()):
            all_detections[i][label] = image_detections[
                image_detections[:, -1] == label, :-1]

        #print('{}/{}'.format(i + 1, generator.size()), end='\r')

    return all_detections
Example #2
0
def _get_detections(generator, model, score_threshold=0.05, max_detections=100, save_path=None, experiment=None):
    """ Get the detections from the model using the generator.

    The result is a list of lists such that the size is:
        all_detections[num_images][num_classes] = detections[num_detections, 4 + num_classes]

    # Arguments
        generator       : The generator used to run images through the model.
        model           : The model to run on the images.
        score_threshold : The score confidence threshold to use.
        max_detections  : The maximum number of detections to use per image.
        save_path       : The path to save the images with visualized detections to.
        experiment    : Comet ML experiment
    # Returns
        A list of lists containing the detections for each image in the generator.
    """
    all_detections = [[None for i in range(generator.num_classes())] for j in range(generator.size())]

    for i in range(generator.size()):
        raw_image    = generator.load_image(i)
        plot_image = copy.deepcopy(raw_image)


        #Format name and save
        image_name = generator.image_names[i]        
        row = generator.image_data[image_name]             
        lfname = os.path.splitext(row["tile"])[0] + "_" + str(row["window"]) +"raw_image"              
        
        #Skip if missing a component data source
        if raw_image is False:
            print("Empty image, skipping")
            continue
        
        #Store plotting images
        plot_rgb = plot_image[:,:,:3].copy()
        plot_chm = plot_image[:,:,3]
        
        #predict
        image        = generator.preprocess_image(raw_image)
        image, scale = generator.resize_image(image)

        if keras.backend.image_data_format() == 'channels_first':
            image = image.transpose((2, 0, 1))

        # run network
        boxes, scores, labels = model.predict_on_batch(np.expand_dims(image, axis=0))[:3]

        # correct boxes for image scale
        boxes /= scale

        # select indices which have a score above the threshold
        indices = np.where(scores[0, :] > score_threshold)[0]

        # select those scores
        scores = scores[0][indices]

        # find the order with which to sort the scores
        scores_sort = np.argsort(-scores)[:max_detections]

        # select detections
        image_boxes      = boxes[0, indices[scores_sort], :]
        image_scores     = scores[scores_sort]
        image_labels     = labels[0, indices[scores_sort]]
        image_detections = np.concatenate([image_boxes, np.expand_dims(image_scores, axis=1), np.expand_dims(image_labels, axis=1)], axis=1)
        
        #name image
        image_name = generator.image_names[i]        
        row = generator.image_data[image_name]             
        fname = os.path.splitext(row["tile"])[0] + "_" + str(row["window"])
        
        #drape boxes
        #get lidar cloud if a new tile, or if not the same tile as previous image.
        if generator.with_lidar:
            if i == 0:
                generator.load_lidar_tile()
            elif not generator.image_data[i]["tile"] == generator.image_data[i-1]["tile"]:
                generator.load_lidar_tile()
        
        #The tile could be the full tile, so let's check just the 400 pixel crop we are interested    
        #Not the best structure, but the on-the-fly generator always has 0 bounds
        if hasattr(generator, 'hf'):
            bounds = generator.hf["utm_coords"][generator.row["window"]]    
        else:
            bounds=[]
        
        if generator.with_lidar:
            density = Lidar.check_density(generator.lidar_tile, bounds=bounds)
                            
            if density > 100:
                #find window utm coordinates
                #print("Bounds for image {}, window {}, are {}".format(generator.row["tile"], generator.row["window"], bounds))
                pc = postprocessing.drape_boxes(boxes=image_boxes, pc = generator.lidar_tile, bounds=bounds)     
                
                #Get new bounding boxes
                image_boxes = postprocessing.cloud_to_box(pc, bounds)    
                image_scores = image_scores[:image_boxes.shape[0]]
                image_labels = image_labels[:image_boxes.shape[0]]          
                image_detections = np.concatenate([image_boxes, np.expand_dims(image_scores, axis=1), np.expand_dims(image_labels, axis=1)], axis=1)
                
            else:
                pass
                #print("Point density of {:.2f} is too low, skipping image {}".format(density, generator.row["tile"]))        

        if save_path is not None:
            
            draw_annotations(plot_rgb, generator.load_annotations(i), label_to_name=generator.label_to_name)
            draw_detections(plot_rgb, image_boxes, image_scores, image_labels, label_to_name=generator.label_to_name,score_threshold=score_threshold)
        
            #name image
            image_name=generator.image_names[i]        
            row=generator.image_data[image_name]             
            fname=os.path.splitext(row["tile"])[0] + "_" + str(row["window"])
        
            #Write RGB
            cv2.imwrite(os.path.join(save_path, '{}.png'.format(fname)), plot_rgb)
            
            #Format name and save
            image_name = generator.image_names[i]        
            row = generator.image_data[image_name]             
            lfname = os.path.splitext(row["tile"])[0] + "_" + str(row["window"]) +"_lidar"
            
            #make cv2 colormap
            #normalize visual to make clearer for plotting
            plot_chm = plot_chm/plot_chm.max() * 255            
            chm = np.uint8(plot_chm)
            draw_annotations(chm, generator.load_annotations(i), label_to_name=generator.label_to_name)
            draw_detections(chm, image_boxes, image_scores, image_labels, label_to_name=generator.label_to_name,score_threshold=score_threshold)
            
            #Write CHM
            cv2.imwrite(os.path.join(save_path, '{}_LIDAR.png'.format(lfname)), chm)            
            
            if experiment:
                experiment.log_image(os.path.join(save_path, '{}_LIDAR.png'.format(lfname)),file_name=lfname)      
                experiment.log_image(os.path.join(save_path, '{}.png'.format(fname)),file_name=fname)      

        # copy detections to all_detections
        for label in range(generator.num_classes()):
            all_detections[i][label] = image_detections[image_detections[:, -1] == label, :-1]

    return all_detections
Example #3
0
    #only pass score threshold boxes
    quality_boxes = []
    for box, score, label in zip(boxes[0], scores[0], labels[0]):
        quality_boxes.append(box)
        # scores are sorted so we can break
        if score < args.score_threshold:
            break

    #drape boxes
    #get image name and load point cloud
    image_name = os.path.splitext(os.path.basename(image_path))[0]
    point_cloud_filename = os.path.join(DeepForest_config["lidar_path"] +
                                        image_name) + ".laz"
    pc = Lidar.load_lidar(point_cloud_filename)
    pc = postprocessing.drape_boxes(boxes=quality_boxes, pc=pc)

    #Skip if point density is too low
    if pc:
        #Get new bounding boxes
        new_boxes = postprocessing.cloud_to_box(pc)
        #expends 3dim
        new_boxes = np.expand_dims(new_boxes, 0)

        # visualize detections
        for box, score, label in zip(new_boxes[0], scores[0], labels[0]):
            # scores are sorted so we can break
            if score < args.score_threshold:
                break

            color = label_color(label)
Example #4
0
def neonRecall(
    sites,
    generator,
    model,
    score_threshold=0.05,
    max_detections=100,
    suppression_threshold=0.15,
    save_path=None,
    experiment=None):

    point_contains = [ ]

    site_data_dict = {}
    for site in sites:
        #Container for recall pts.
        
        #load field data
        field_data = pd.read_csv("data/field_data.csv") 
        field_data = field_data[field_data['UTM_E'].notnull()]
    
        #select site
        site_data = field_data[field_data["siteID"]==site]
    
        #select tree species
        specieslist = pd.read_csv("data/AcceptedSpecies.csv",encoding="utf-8")
        specieslist =  specieslist[specieslist["siteID"] == site]
    
        site_data = site_data[site_data["scientificName"].isin(specieslist["scientificName"].values)]
    
        #Single bole individuals as representitve, no individualID ending in non-digits
        site_data = site_data[site_data["individualID"].str.contains("\d$")]
        site_data_dict[site] = site_data
        
        #Only data within the last two years, sites can be hand managed
        #site_data=site_data[site_data["eventID"].str.contains("2015|2016|2017|2018")]
        
    for i in range(generator.size()):
        
        #Load image
        raw_image    = generator.load_image(i)
        plot_image = copy.deepcopy(raw_image)
   
        #Skip if missing a component data source
        if raw_image is False:
            print("Empty image, skipping")
            continue
        
        #Store plotting images.
        plot_rgb = plot_image[:,:,:3].copy()
        plot_chm = plot_image[:,:,3]     
        
        image        = generator.preprocess_image(raw_image)
        image, scale = generator.resize_image(image)

        # run network
        boxes, scores, labels = model.predict_on_batch(np.expand_dims(image, axis=0))[:3]

        # correct boxes for image scale
        boxes /= scale

        # select indices which have a score above the threshold
        indices = np.where(scores[0, :] > score_threshold)[0]

        # select those scores
        scores = scores[0][indices]

        # find the order with which to sort the scores
        scores_sort = np.argsort(-scores)[:max_detections]

        # select detections
        image_boxes      = boxes[0, indices[scores_sort], :]
        image_scores     = scores[scores_sort]
        image_labels     = labels[0, indices[scores_sort]]
        image_detections = np.concatenate([image_boxes, np.expand_dims(image_scores, axis=1), np.expand_dims(image_labels, axis=1)], axis=1)
        
        #Find geographic bounds
        base_dir = generator.DeepForest_config[generator.row["site"]][generator.name]["RGB"]
        tile_path = os.path.join(base_dir, generator.image_data[i]["tile"])
        
        with rasterio.open(tile_path) as dataset:
            tile_bounds = dataset.bounds   
    
        #drape boxes
        #get lidar cloud if a new tile, or if not the same tile as previous image.
        if i == 0:
            generator.load_lidar_tile()
        elif not generator.image_data[i]["tile"] == generator.image_data[i-1]["tile"]:
            generator.load_lidar_tile()
        
        #The tile could be the full tile, so let's check just the 400 pixel crop we are interested    
        #Not the best structure, but the on-the-fly generator always has 0 bounds
        if hasattr(generator, 'hf'):
            bounds = generator.hf["utm_coords"][generator.row["window"]]    
        else:
            bounds=[]
            
        density = Lidar.check_density(generator.lidar_tile, bounds=bounds)
        
        #print("Point density is {:.2f}".format(density))
                
        if density > 100:
            #find window utm coordinates
            #print("Bounds for image {}, window {}, are {}".format(generator.row["tile"], generator.row["window"], bounds))
            pc = postprocessing.drape_boxes(boxes=image_boxes, pc = generator.lidar_tile, bounds=bounds)     
            
            #Get new bounding boxes
            new_boxes = postprocessing.cloud_to_box(pc, bounds)    
            new_scores = image_scores[:new_boxes.shape[0]]
            new_labels = image_labels[:new_boxes.shape[0]]          
            image_detections = np.concatenate([new_boxes, np.expand_dims(new_scores, axis=1), np.expand_dims(new_labels, axis=1)], axis=1)
            
        else:
            #print("Point density of {:.2f} is too low, skipping image {}".format(density, generator.row["tile"]))   
            pass

        #add spatial NEON points
        site_data =site_data_dict[generator.row["site"]]
        plotID = os.path.splitext(generator.image_data[i]["tile"])[0]
        plot_data = site_data[site_data.plotID == plotID]

        #Save image and send it to logger
        if save_path is not None:
            
            x = (plot_data.UTM_E - tile_bounds.left).values / 0.1
            y = (tile_bounds.top - plot_data.UTM_N).values / 0.1
            
            for j in np.arange(len(x)):
                cv2.circle(plot_image,(int(x[j]),int(y[j])), 2, (0,0,255), -1)
    
            #Write RGB
            draw_detections(plot_rgb, image_boxes, image_scores, image_labels, label_to_name=generator.label_to_name,score_threshold=score_threshold)
        
            #name image
            image_name=generator.image_names[i]        
            row=generator.image_data[image_name]             
            fname=os.path.splitext(row["tile"])[0] + "_" + str(row["window"])
        
            #Write RGB
            cv2.imwrite(os.path.join(save_path, '{}_NeonPlot.png'.format(fname)), plot_rgb)
            
            plot_chm = plot_chm/plot_chm.max() * 255                        
            chm = np.uint8(plot_chm)
            draw_detections(chm, image_boxes, image_scores, image_labels, label_to_name=generator.label_to_name, score_threshold=score_threshold, color = (80,127,255))            
            cv2.imwrite(os.path.join(save_path, '{}_Lidar_NeonPlot.png'.format(plotID)), chm)
                
            #Format name and save
            if experiment:
                experiment.log_image(os.path.join(save_path, '{}_NeonPlot.png'.format(plotID)),file_name=str(plotID))
                experiment.log_image(os.path.join(save_path, '{}_Lidar_NeonPlot.png'.format(plotID)),file_name=str("Lidar_" + plotID))
        
        #calculate recall
            s = gp.GeoSeries(map(Point, zip(plot_data.UTM_E, plot_data.UTM_N)))
    
        #Calculate recall
        projected_boxes = []
        
        for row in  image_boxes:
            #Add utm bounds and create a shapely polygon
            pbox=create_polygon(row, tile_bounds, cell_size=0.1)
            projected_boxes.append(pbox)
    
        #for each point, is it within a prediction?
        for index, tree in plot_data.iterrows():
            p=Point(tree.UTM_E, tree.UTM_N)
    
            within_polygon=[]
            for prediction in projected_boxes:
                within_polygon.append(p.within(prediction))
    
            #Check for overlapping polygon, add it to list
            is_within = sum(within_polygon) > 0
            point_contains.append(is_within)
                
    #sum recall across plots
    if len(point_contains)==0:
        recall = None
    else:
        ## Recall rate for plot
        recall = sum(point_contains)/len(point_contains)    
        
    return(recall)