class MaskedAnnotation(Annotation): def __init__(self, label, score, box, mask=None): super(MaskedAnnotation, self).__init__(label, score, box) self.mask = Mask(mask if mask is not None else []) def crop(self, image_np): transparent = self.mask.as_transparency(image_np) return self.rect.crop(transparent) def draw(self, image_np, color, draw_label=True, draw_rect=True): self.mask.draw(image_np, color) if draw_rect: self.rect.draw(image_np, color) @classmethod def from_results(cls, num_detections, labels, scores, boxes, masks=None, min_confidence=0.5): masks = masks if masks is not None else [] annotations = [] zipped = zip(labels[:num_detections], scores[:num_detections], boxes[:num_detections], masks[:num_detections]) for i, (label, score, box, mask) in enumerate(zipped): if score >= min_confidence: annotations.append(cls(label, score, box, mask)) return annotations def __str__(self): return "{} :: {}".format(self.label, self.score)
def apply(self, image_np): gray = cv2.cvtColor(image_np, cv2.COLOR_BGR2GRAY) edges = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=3) ret, thresh = cv2.threshold(edges, 8, 255, cv2.THRESH_BINARY_INV) opened = cv2.morphologyEx(thresh.astype(np.uint8), cv2.MORPH_OPEN, (3, 3)) mask = Mask(opened) return mask.apply(image_np)
def apply(self, image_np): self.subtractor.apply(image_np) motion_mask = self.subtractor.apply(image_np) motion_mask = cv2.dilate(motion_mask, self.kernel, iterations=20) motion_mask = cv2.erode(motion_mask, self.kernel, iterations=20) self.motion_mask = Mask(np.logical_not(motion_mask)) return self.motion_mask.apply(image_np)
def apply(self, image_np): gray = cv2.cvtColor(image_np, cv2.COLOR_BGR2GRAY) edges = cv2.Laplacian(gray, cv2.CV_64F) thresh = cv2.threshold(edges, 8, 255, cv2.THRESH_BINARY_INV)[1].astype(np.uint8) thresh = cv2.erode(thresh, (1, 1), iterations=1) mask = Mask(thresh) return mask.apply( self.quant.apply( image_np # self.bilateral_filter.apply(image_np) ) )
class BackgroundSubtractor(ImageHandler): def __init__(self): """ Removes background (stationary) elements from an image, accumulating information across multiple images. """ self.annotations = None self.subtractor = cv2.createBackgroundSubtractorKNN(history=1000) self.kernel = np.ones((3, 3), np.uint8) def apply(self, image_np): self.subtractor.apply(image_np) motion_mask = self.subtractor.apply(image_np) motion_mask = cv2.dilate(motion_mask, self.kernel, iterations=20) motion_mask = cv2.erode(motion_mask, self.kernel, iterations=20) self.motion_mask = Mask(motion_mask) return self.motion_mask.apply(image_np)
def __init__(self, label, score, box, mask=None): super(MaskedAnnotation, self).__init__(label, score, box) self.mask = Mask(mask if mask is not None else [])
def apply(self, image_np): edges = cv2.Canny(cv2.cvtColor(image_np, cv2.COLOR_BGR2GRAY), 100, 200) ret, thresh = cv2.threshold(edges, 127, 255, cv2.THRESH_BINARY_INV) mask = Mask(np.squeeze(thresh)) return mask.apply(image_np)
def apply(self, image_np): gray = cv2.cvtColor(image_np, cv2.COLOR_BGR2GRAY) ret, thresh = cv2.threshold(gray, 127, 255, self.mode) mask = Mask(thresh) return mask.apply(image_np)