def run_on_block(c_im, h_im, padding=0): """Run complete model on the block c_im and its corresponding height block h_im.""" if c_im.mean() <= 2: # black part return [], np.array([]) green_centers = proc.green_hotspots( c_im, sigma=sigma, padding=padding) # run green region proposer dark_centers = proc.dark_hotspots( c_im, sigma=sigma, padding=padding) # run dark region proposer c_coords = np.concatenate((green_centers, dark_centers)) c_rects, h_rects = create_boxes(c_coords) c_crops, h_crops = fill_data_tensor(c_im, h_im, c_rects, h_rects) predictions = box_model.predict([c_crops, h_crops], verbose=1) # run classification model sorted_predictions = proc.multi_class_sort(c_rects, predictions) output = [([], [], []) for k in range(len(sorted_predictions))] for (k, (rects, probs)) in enumerate(sorted_predictions): if len(rects) > 0: rects, probs = proc.non_max_suppression(rects, probs=probs, t=overlap_threshold) masks = proc.get_masks(rects, c_im, mask_models[k], verbose=1) output[k] = (rects, probs, masks) new_output = [([], []) for k in range(len(output))] for (k, (rects, probs, masks)) in enumerate(output): try: if filter_empty_masks: rects, probs, masks = proc.discard_empty(rects, probs, masks, t=crop_size_threshold) if filter_disjoint: masks = proc.remove_unconnected_components(masks) if recenter: rects, altered = proc.recenter_boxes( rects, masks, d=center_distance) # indeces of moved boxes new_masks = proc.get_masks( rects[altered], c_im, mask_models[k], verbose=1) # compute new masks of moved boxes if filter_disjoint: new_masks = proc.remove_unconnected_components(new_masks) masks[altered] = new_masks # set new masks if filter_empty_masks: rects, probs, masks = proc.discard_empty( rects, probs, masks, t=crop_size_threshold) cnts = proc.find_contours(rects, masks) ctrs = proc.find_centroids(rects, masks) new_output[k] = (cnts, ctrs) except: print('No {} lettuce found'.format(['drk', 'grn'][k])) return new_output
def run_on_block(c_im, h_im, padding=0): """Run complete model on the block c_im and its corresponding height block h_im.""" if c_im.mean() <= 2: # black part raise IndexError c_coords = proc.window_hotspots_centers(c_im, sigma=sigma, padding=padding, top_left=0) # run region proposer c_rects, h_rects = create_boxes(c_coords) c_crops, h_crops = fill_data_tensor(c_im, h_im, c_rects, h_rects) predictions = box_model.predict([c_crops, h_crops], verbose=1) # run classification model broc_rects, broc_probs = proc.sort_into_classes(c_rects, predictions) broc_rects, broc_probs = proc.non_max_suppression(broc_rects, probs=broc_probs, t=overlap_threshold) masks = proc.get_masks(broc_rects, c_im, mask_model, verbose=1) # compute masks for each box if filter_masks: broc_rects, broc_probs, masks = proc.discard_empty( broc_rects, broc_probs, masks, t=crop_size_threshold) if filter_disjoint: masks = proc.remove_unconnected_components(masks) if recenter: broc_rects, altered = proc.recenter_boxes( broc_rects, masks, d=center_distance) # indeces of moved boxes new_masks = proc.get_masks( broc_rects[altered], c_im, mask_model, verbose=1) # compute new masks of moved boxes if filter_disjoint: new_masks = proc.remove_unconnected_components(new_masks) masks[altered] = new_masks # set new masks if filter_masks: broc_rects, broc_probs, masks = proc.discard_empty( broc_rects, broc_probs, masks, t=crop_size_threshold) contours = proc.find_contours(broc_rects, masks) centroids = proc.find_centroids(broc_rects, masks) return contours, centroids
def run_on_block(c_im, h_im, padding=0, get_background=False): """Run complete model on the block c_im and its corresponding height block h_im.""" if c_im.mean() <= 1e-6: # black part raise IndexError c_coords = proc.green_hotspots(c_im, sigma=sigma, padding=padding) # run region proposer c_rects, h_rects = create_boxes(c_coords, box_size) input_RGB, input_DEM = fill_data_tensor(c_im, h_im, c_rects, h_rects) # -------- input_RGB = proc.apply_preprocessing(input_RGB, function=proc.cielab) # -------- predictions, masks = network.predict([input_RGB, input_DEM], verbose=1) # run classification model masks = masks[..., 0] crop_idxs = proc.get_class_idxs(predictions, 1) boxes, [confidence, masks ] = c_rects[crop_idxs], [predictions[crop_idxs], masks[crop_idxs]] boxes, [confidence, masks] = proc.non_max_suppression(boxes, other=[confidence, masks], t=overlap_threshold) masks = proc.get_hard_masks(masks) if filter_empty_masks: masks, boxes, [confidence] = proc.discard_empty(masks, boxes, other=[confidence], t=crop_size_threshold) contours = proc.find_contours(boxes, masks) centroids = proc.find_centroids(boxes, masks) if get_background: background_boxes, background_confidence = proc.get_class( c_rects, predictions, 0) return [contours, centroids, boxes, confidence], [background_boxes, background_confidence] else: return [contours, centroids, boxes, confidence], [[], []]
def run_on_block(c_im, h_im, padding=0, get_background=False): """Run complete model on the block c_im and its corresponding height block h_im.""" if c_im.mean() <= 2: # black part raise IndexError c_coords = proc.green_hotspots(c_im, sigma=sigma, padding=padding) # run region proposer c_rects, h_rects = create_boxes(c_coords) c_crops, h_crops = fill_data_tensor(c_im, h_im, c_rects, h_rects) predictions = box_model.predict([c_crops, h_crops], verbose=1) # run classification model boxes, confidence = proc.get_class(c_rects, predictions, 1) boxes, [confidence] = proc.non_max_suppression(boxes, other=[confidence], t=overlap_threshold) masks = proc.get_masks(boxes, c_im, mask_model, verbose=1) # compute masks for each box if filter_empty_masks: masks, [boxes, confidence] = proc.discard_empty(masks, other=[boxes, confidence], t=crop_size_threshold) if filter_disjoint: masks = proc.remove_unconnected_components(masks) if recenter: boxes, altered = proc.recenter_boxes(boxes, masks, d=center_distance) # indeces of moved boxes new_masks = proc.get_masks(boxes[altered], c_im, mask_model, verbose=1) # compute new masks of moved boxes if filter_disjoint: new_masks = proc.remove_unconnected_components(new_masks) masks[altered] = new_masks # set new masks if filter_empty_masks: boxes, confidence, masks = proc.discard_empty(boxes, confidence, masks, t=crop_size_threshold) contours = proc.find_contours(boxes, masks) centroids = proc.find_centroids(boxes, masks) if get_background: background_boxes, background_confidence = proc.get_class(c_rects, predictions, 0) return contours, centroids, confidence, boxes, background_boxes, background_confidence else: return contours, centroids, confidence, boxes
def run_on_block(self, rgb_block, dem_block, get_background=False): """Run detection algorithm on RGB block and its corresponding DEM block. A short summary of the detection algorithm: * First generate RoI's, and put boxes of a fixed size at these locations. * Feed the data in the boxes through a combined classification and masking network. * Sort the results into crops and background. * Apply post-processing to crop results, like non-max-suppression to discard overlapping boxes, and clean up masks. * Convert masks to contours and centroids. Arguments --------- rgb_block : (?,?,3) block RGB data block. dem_block : (?,?) block DEM data block corresponding to RGB block. get_background : bool, optional If set to True, all boxes that contain background are stored and returned Returns ------- crop_output : list of length 4 List containing contours, centroids, boxes and confidence scores bg_output : list of length 2 List containing background boxes and confidence scores. If get_background==False, this is a list containing two empty lists. Raises ------ IndexError : if the input block is completely black (no data). """ if rgb_block.mean() <= 1e-6: raise IndexError('This block contains no data.') rgb_coords = processing.green_hotspots(rgb_block, sigma=self.Settings.sigma, padding=self.Settings.box_size) # run region proposer rgb_boxes, dem_boxes = self.create_boxes(rgb_coords, self.Settings.box_size) rgb_input_tensor, dem_input_tensor = self.fill_data_tensors(rgb_block, dem_block, rgb_boxes, dem_boxes) predictions, masks = self.network.predict([rgb_input_tensor, dem_input_tensor], verbose=1) # run classification model masks = masks[...,0] output = dict() for class_idx in range(1, self.num_classes): cls_idxs = processing.get_class_idxs(predictions, class_idx) cls_boxes, [cls_confidence, cls_masks] = rgb_boxes[cls_idxs], [predictions[cls_idxs], masks[cls_idxs]] cls_boxes, [cls_confidence, cls_masks] = processing.non_max_suppression(cls_boxes, other=[cls_confidence, cls_masks], t=self.Settings.overlap_threshold) cls_masks = processing.get_hard_masks(cls_masks) cls_masks, cls_boxes, [cls_confidence] = processing.discard_empty(cls_masks, cls_boxes, other=[cls_confidence], t=self.Settings.crop_size_threshold) cls_contours = processing.find_contours(cls_boxes, cls_masks) cls_centroids = processing.find_centroids(cls_boxes, cls_masks) output[class_idx] = {'contours' : cls_contours, 'centroids' : cls_centroids, 'boxes' : cls_boxes, 'confidence' : cls_confidence} if get_background: bg_idxs = processing.get_class_idxs(predictions, 0) background_boxes, background_confidence = rgb_boxes[bg_idxs], predictions[bg_idxs] return output, [background_boxes, background_confidence] else: return output, [[],[]]