예제 #1
0
def detect_objects_large(frame, mask, fgbg, origin):
    masked = cv2.convertScaleAbs(frame, alpha=1, beta=0)
    gain = 15
    masked = cv2.convertScaleAbs(masked, alpha=1, beta=256-average_brightness(16, frame, mask)+gain)

    masked = fgbg.apply(masked, learningRate=-1)

    kernel = np.ones((5, 5), np.uint8)
    # Remove Noise
    masked = cv2.morphologyEx(masked, cv2.MORPH_OPEN, kernel, iterations=int(1))

    masked = cv2.dilate(masked, kernel, iterations=int(4*SCALE_FACTOR))

    contours, hierarchy = cv2.findContours(masked, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

    n_keypoints = len(contours)
    centroids = np.zeros((n_keypoints, 2))
    sizes = np.zeros((n_keypoints,2))
    for i, contour in enumerate(contours):
        M = cv2.moments(contour)
        centroids[i] = [int(M['m10']/M['m00']), int(M['m01']/M['m00'])]
        centroids[i] += origin
        x, y, w, h = cv2.boundingRect(contour)
        sizes[i] = (w, h)

    return centroids, sizes, masked
예제 #2
0
def detect_objects(frame, mask, fgbg, detector, origin):
    masked = cv2.convertScaleAbs(frame, alpha=1, beta=0)
    gain = 15
    masked = cv2.convertScaleAbs(masked,
                                 alpha=1,
                                 beta=256 -
                                 average_brightness(16, frame, mask) + gain)

    masked = fgbg.apply(masked, learningRate=-1)

    masked = remove_ground(masked, int(13 / (2.26 / SCALE_FACTOR)), 0.6, frame)

    kernel_dilation = np.ones((5, 5), np.uint8)
    masked = cv2.dilate(masked, kernel_dilation, iterations=2)

    masked = cv2.bitwise_not(masked)

    # keypoints = []
    # Blob detection
    keypoints = detector.detect(masked)

    n_keypoints = len(keypoints)
    centroids = np.zeros((n_keypoints, 2))
    sizes = np.zeros((n_keypoints, 2))
    for i in range(n_keypoints):
        centroids[i] = keypoints[i].pt
        centroids[i] += origin
        sizes[i] = keypoints[i].size

    return centroids, sizes, masked
예제 #3
0
def detect_objects(frame, mask, fgbg, detector, origin):
    # Adjust contrast and brightness of image to make foreground stand out more
    # alpha used to adjust contrast, where alpha < 1 reduces contrast and alpha > 1 increases it
    # beta used to increase brightness, scale of (-255 to 255) ? Needs confirmation
    # formula is im_out = alpha * im_in + beta
    # Therefore to change brightness before contrast, we need to do alpha = 1 first
    masked = cv2.convertScaleAbs(frame, alpha=1, beta=0)
    gain = 15
    masked = cv2.convertScaleAbs(masked,
                                 alpha=1,
                                 beta=256 -
                                 average_brightness(16, frame, mask) + gain)
    # masked = cv2.convertScaleAbs(masked, alpha=2, beta=128)
    # masked = cv2.cvtColor(masked, cv2.COLOR_BGR2GRAY)

    # masked = threshold_rgb(frame)

    imshow_resized("pre-background subtraction", masked)

    # Subtract Background
    # Learning rate affects how often the model is updated
    # High values > 0.5 tend to lead to patchy output
    # Found that 0.1 - 0.3 is a good range
    masked = fgbg.apply(masked, learningRate=-1)

    imshow_resized("background subtracted", masked)

    masked = remove_ground(masked, int(13 / (2.26 / SCALE_FACTOR)), 0.6, frame)

    # Morphological Transforms
    # Close to remove black spots
    # masked = imclose(masked, 3, 1)
    # Open to remove white holes
    # masked = imopen(masked, 3, 2)
    # masked = imfill(masked)
    kernel_dilation = np.ones((5, 5), np.uint8)
    masked = cv2.dilate(masked, kernel_dilation, iterations=2)

    # Apply foreground mask (dilated) to the image and perform detection on that
    # masked = cv2.bitwise_and(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY), masked)

    # Invert frame such that black pixels are foreground
    masked = cv2.bitwise_not(masked)

    # keypoints = []
    # Blob detection
    keypoints = detector.detect(masked)

    n_keypoints = len(keypoints)
    centroids = np.zeros((n_keypoints, 2))
    sizes = np.zeros((n_keypoints, 2))
    for i in range(n_keypoints):
        centroids[i] = keypoints[i].pt
        centroids[i] += origin
        sizes[i] = keypoints[i].size

    return centroids, sizes, masked
예제 #4
0
def detect_objects(frame, mask, fgbg, detector, origin, index, scale_factor):
    if average_brightness_hsv(16, frame, mask) > parm.BRIGHTNESS_THRES:
        # If sun compensation is required, extract the sky and apply localised contrast increase to it
        # And then restore the non-sky (i.e. treeline) back into the image to avoid losing data
        masked, non_sky = extract_sky(frame)
        masked = cv2.convertScaleAbs(masked, alpha=2, beta=0)
        masked = cv2.add(masked, non_sky)
    else:
        masked = cv2.convertScaleAbs(frame, alpha=1, beta=0)
    imshow_resized("pre-backhground subtraction", masked)
    masked = cv2.convertScaleAbs(
        masked,
        alpha=1,
        beta=256 - average_brightness(16, frame, mask) + parm.BRIGHTNESS_GAIN)
    # masked = cv2.convertScaleAbs(masked, alpha=2, beta=128)
    # masked = cv2.cvtColor(masked, cv2.COLOR_BGR2GRAY)
    # masked = threshold_rgb(frame)

    # Subtract Background
    # Learning rate affects how often the model is updated
    # High values > 0.5 tend to lead to patchy output
    # Found that 0.1 - 0.3 is a good range
    masked = fgbg.apply(masked, learningRate=parm.FGBG_LEARNING_RATE)
    masked = remove_ground(masked, int(13 / (2.26 / scale_factor)), 0.5, frame,
                           index)
    cv2.imshow("after remove ground", masked)

    # Morphological Transforms
    # Close to remove black spots
    # masked = imclose(masked, 3, 1)
    # Open to remove white holes
    # masked = imopen(masked, 3, 2)
    # masked = imfill(masked)
    kernel_dilation = np.ones((5, 5), np.uint8)
    masked = cv2.dilate(masked, kernel_dilation, iterations=parm.DILATION_ITER)

    # Apply foreground mask (dilated) to the image and perform detection on that
    # masked = cv2.bitwise_and(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY), masked)

    # Invert frame such that black pixels are foreground
    masked = cv2.bitwise_not(masked)
    cv2.imshow("after dilation again and inversion", masked)

    # Blob detection
    keypoints = detector.detect(masked)

    n_keypoints = len(keypoints)
    centroids = np.zeros((n_keypoints, 2))
    sizes = np.zeros((n_keypoints, 2))
    for i in range(n_keypoints):
        centroids[i] = keypoints[i].pt
        centroids[i] += origin
        sizes[i] = keypoints[i].size

    return centroids, sizes, masked
예제 #5
0
    def detect_objects(
        self,
        frame,
        fgbg,
        detector,
        mask=None,
    ):
        # Adjust contrast and brightness of image to make foreground stand out more
        # alpha used to adjust contrast, where alpha < 1 reduces contrast and alpha > 1 increases it
        # beta used to increase brightness, scale of (-255 to 255) ? Needs confirmation
        # formula is im_out = alpha * im_in + beta
        if not self.brightness_override:
            gain = 15
            masked = cv2.convertScaleAbs(
                frame,
                alpha=1,
                beta=256 - average_brightness(16, frame, mask) + gain)
        else:
            masked = cv2.convertScaleAbs(frame,
                                         alpha=1,
                                         beta=self.brightness_threshold)
            print(self.brightness_threshold)
        if self.debug: imshow_resized('Brightness Adjusted', masked)

        # Subtract Background
        # Learning rate affects how often the model is updated
        # High values > 0.5 tend to lead to patchy output
        # Found that 0.1 - 0.3 is a good range
        masked = fgbg.apply(masked, learningRate=-1)
        if self.debug: imshow_resized('Foreground mask', masked)

        self.remove_ground(masked, int(13 / (2.26 / self.cap_scaling)), 0.7,
                           frame)

        kernel_dilation = np.ones((5, 5), np.uint8)
        masked = cv2.dilate(masked, kernel_dilation, iterations=2)

        # Invert frame such that black pixels are foreground
        masked = cv2.bitwise_not(masked)

        # keypoints = []
        # Blob detection
        keypoints = detector.detect(masked)

        n_keypoints = len(keypoints)
        centroids = np.zeros((n_keypoints, 2))
        sizes = np.zeros(n_keypoints)
        for i in range(n_keypoints):
            centroids[i] = keypoints[i].pt
            centroids[i] += self.origin
            sizes[i] = keypoints[i].size

        return masked, centroids, sizes