def __init__(self):

        # Configuration parameters

        self.expand_blobs = (config.getboolean('EXPAND_BLOBS'),
                             config.getfloat('EXPAND_BLOBS_RATIO'))

        self.detect_blobs_by_bounding_boxes = \
            config.getboolean('DETECT_BLOBS_BY_BOUNDING_BOXES')

        # If SimpleBlobDetector will be used, load the parameters
        if not self.detect_blobs_by_bounding_boxes:

            self.threshold = [
                config.getint('MIN_THRESHOLD'),
                config.getint('MAX_THRESHOLD'),
                config.getint('THRESHOLD_STEP')
            ]
            self.min_dist_between_blobs = \
                config.getint('MIN_DIST_BETWEEN_BLOBS')
            self.filter_by_color = [
                config.getboolean('FILTER_BY_COLOR'),
                config.getint('BLOB_COLOR')
            ]
            self.filter_by_area = [
                config.getboolean('FILTER_BY_AREA'),
                config.getint('MIN_AREA'),
                config.getint('MAX_AREA')
            ]
            self.filter_by_circularity = \
                [config.getboolean('FILTER_BY_CIRCULARITY'),
                 config.getfloat('MIN_CIRCULARITY'),
                 config.getfloat('MAX_CIRCULARITY')]
            self.filter_by_convexity = [
                config.getboolean('FILTER_BY_CONVEXITY'),
                config.getfloat('MIN_CONVEXITY'),
                config.getfloat('MAX_CONVEXITY')
            ]
            self.filter_by_inertia = [
                config.getboolean('FILTER_BY_INERTIA'),
                config.getfloat('MIN_INERTIA'),
                config.getfloat('MAX_INERTIA')
            ]

            # Setup SimpleBlobDetector parameters
            params = cv2.SimpleBlobDetector_Params()

            # Change thresholds
            params.minThreshold = self.threshold[0]
            params.maxThreshold = self.threshold[1]
            params.thresholdStep = self.threshold[2]

            # Minimum distance between blobs
            params.minDistBetweenBlobs = self.min_dist_between_blobs

            # Filter by Color
            params.filterByColor = self.filter_by_color[0]
            params.blobColor = self.filter_by_color[1]

            # Filter by Area.
            params.filterByArea = self.filter_by_area[0]
            params.minArea = self.filter_by_area[1]
            params.maxArea = self.filter_by_area[2]

            # Filter by Circularity
            params.filterByCircularity = self.filter_by_circularity[0]
            params.minCircularity = self.filter_by_circularity[1]
            params.maxCircularity = self.filter_by_circularity[2]

            # Filter by Convexity
            params.filterByConvexity = self.filter_by_convexity[0]
            params.minConvexity = self.filter_by_convexity[1]
            params.maxConvexity = self.filter_by_convexity[2]

            # Filter by Inertia
            params.filterByInertia = self.filter_by_inertia[0]
            params.minInertiaRatio = self.filter_by_inertia[1]
            params.maxInertiaRatio = self.filter_by_inertia[2]

            self.detector = cv2.SimpleBlobDetector_create(params)
def main():
    global velocity_pub, bridge, blob_parameters, detector, color, new_img_available, new_img_msg, low_hue, low_sat, low_val, high_hue, high_sat, high_val, cv_image, hsv_image, lower_limits, upper_limits, thresholded_image, keypoints, first_blob, robot_vel, x_coord, y_coord, blob_size, state_tracking, state_approaching_blob, new_blob_available

    rospy.init_node("follow_three_blobs_final")

    rospy.Subscriber("camera/color/image_raw", Image, get_image)

    velocity_pub = rospy.Publisher("robotont/cmd_vel", Twist, queue_size=1)

    bridge = cv_bridge.core.CvBridge()

    cv2.namedWindow("Thresholded image", cv2.WINDOW_AUTOSIZE)
    cv2.namedWindow('blob_detector/follow_three_blobs_final')

    blob_parameters = cv2.SimpleBlobDetector_Params()

    blob_parameters.filterByColor = True
    blob_parameters.filterByArea = False
    blob_parameters.filterByCircularity = False
    blob_parameters.filterByInertia = False
    blob_parameters.filterByConvexity = False

    blob_parameters.blobColor = 255

    blob_parameters.minArea = 1491
    blob_parameters.maxArea = 307200

    blob_parameters.minCircularity = 0
    blob_parameters.maxCircularity = 1

    blob_parameters.minInertiaRatio = 0
    blob_parameters.maxInertiaRatio = 1

    blob_parameters.minConvexity = 0
    blob_parameters.maxConvexity = 1

    blob_parameters.minDistBetweenBlobs = 100

    detector = cv2.SimpleBlobDetector_create(blob_parameters)

    while not rospy.is_shutdown():

        if new_img_available:

            cv_image = bridge.imgmsg_to_cv2(new_img_msg,
                                            desired_encoding='bgr8')

            cv2.imshow('blob_detector/follow_three_blobs_final', cv_image)

            cv2.waitKey(1)

            new_img_available = False
            hsv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV)

            lower_limits = np.array([low_hue, low_sat, low_val])
            upper_limits = np.array([high_hue, high_sat, high_val])

            thresholded_image = cv2.inRange(hsv_image, lower_limits,
                                            upper_limits)
            thresholded_image = cv2.bitwise_and(cv_image,
                                                cv_image,
                                                mask=thresholded_image)

            cv2.imshow("Thresholded image", thresholded_image)

            keypoints = detector.detect(thresholded_image)

            if len(keypoints) > 0:
                first_blob = keypoints[0]
                x_coord = int(first_blob.pt[0])
                y_coord = int(first_blob.pt[1])
                blob_size = int(first_blob.size)
                print(x_coord, y_coord, blob_size)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
Exemple #3
0
def experiment_notch_detection(path):
    """ Notch detection via circle subtraction and blob detection. """
    # Get thresholded image and hough circles.
    img = load_image(path)
    img_thresh = threshold(img)
    circles = hough_circles(img)

    # Paint out the first circle detected. Assume that only one circle was
    # detected for whole image.
    x, y, r = circles[0]
    cv.circle(img_thresh, (x, y), r, (0, 0, 0), cv.FILLED)

    # Erode what's left to try and remove edges.
    img_thresh = cv.erode(img_thresh, np.ones((3, 3), np.uint8))
    img_thresh = cv.dilate(img_thresh, np.ones((3, 3), np.uint8))

    # Extract a region of interest that is very likely to contain the notch if
    # one is present in the image. This corresponds to a small square at about
    # the 45 degree mark on the eyeball. Some notches are slightly lower than
    # this, so the ROI should be large enough to capture many notch positions.
    ratio = 1.0 / 4.0
    roi_size = img_thresh.shape[0] * ratio
    half_rs = roi_size / 2.0

    # Do a little trig to find cartesian coordinates of 45 degrees point.
    radius = img_thresh.shape[0] / 2.0
    angle = math.pi / 4.0
    side = radius * math.sin(angle)

    # Get the damned ROI.
    x, y = (int(radius + side - half_rs), int(radius - side - half_rs))
    cv.rectangle(img, (x, y), (x + int(roi_size), y + int(roi_size)), (255, 255, 255))
    roi = img_thresh[y:int(y + roi_size), x:int(x + roi_size)]

    # Run blob detection on what's left.
    # Set up the detector with default parameters.
    blob_params = cv.SimpleBlobDetector_Params()
    blob_params.minThreshold = 0.0
    blob_params.maxThreshold = THRESH
    blob_params.thresholdStep = THRESH / 2
    blob_params.filterByArea = False
    blob_params.filterByColor = False
    blob_params.filterByConvexity = False
    blob_params.filterByInertia = True
    blob_params.minInertiaRatio = 0.05
    blob_params.maxInertiaRatio = 1
    sbd = cv.SimpleBlobDetector_create(blob_params)

    # Detect blobs.
    keypoints = sbd.detect(roi)

    # Draw circles around any detected blobs.
    # cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle
    # corresponds to the size of blob
    roi = cv.drawKeypoints(roi, keypoints, np.array([]),
                           (0, 0, 255),
                           cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

    # Image windows for this experiment.
    o_window = "Original Image"
    t_window = "Thresholded, Subtracted, Blob-Detected"
    cv.namedWindow(o_window, cv.WINDOW_AUTOSIZE)
    cv.namedWindow(t_window, cv.WINDOW_AUTOSIZE)
    cv.imshow(o_window, img)
    cv.imshow(t_window, roi)
    cv.waitKey(0)
    return
def circle_detect(captured_img,
                  num_circles,
                  spacing,
                  pad_pixels=(0., 0.),
                  show_preview=True):
    """
    Detects the circle of a circle board pattern

    :param captured_img: captured image
    :param num_circles: a tuple of integers, (num_circle_x, num_circle_y)
    :param spacing: a tuple of integers, in pixels, (space between circles in x, space btw circs in y direction)
    :param show_preview: boolean, default True
    :param pad_pixels: coordinate of the left top corner of warped image.
                       Assuming pad this amount of pixels on the other side.
    :return: a tuple, (found_dots, H)
             found_dots: boolean, indicating success of calibration
             H: a 3x3 homography matrix (numpy)
    """

    # Binarization
    # org_copy = org.copy() # Otherwise, we write on the original image!
    img = (captured_img.copy() * 255).astype(np.uint8)
    if len(img.shape) > 2:
        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    img = cv2.medianBlur(img, 15)
    img_gray = img.copy()

    img = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                cv2.THRESH_BINARY, 121, 0)
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (15, 15))
    img = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
    img = 255 - img

    # Blob detection
    params = cv2.SimpleBlobDetector_Params()

    # Change thresholds
    params.filterByColor = True
    params.minThreshold = 128

    # Filter by Area.
    params.filterByArea = True
    params.minArea = 50

    # Filter by Circularity
    params.filterByCircularity = True
    params.minCircularity = 0.785

    # Filter by Convexity
    params.filterByConvexity = True
    params.minConvexity = 0.87

    # Filter by Inertia
    params.filterByInertia = False
    params.minInertiaRatio = 0.01

    detector = cv2.SimpleBlobDetector_create(params)

    # Detecting keypoints
    # this is redundant for what comes next, but gives us access to the detected dots for debug
    keypoints = detector.detect(img)
    found_dots, centers = cv2.findCirclesGrid(
        img,
        num_circles,
        blobDetector=detector,
        flags=cv2.CALIB_CB_SYMMETRIC_GRID)

    # Drawing the keypoints
    cv2.drawChessboardCorners(captured_img, num_circles, centers, found_dots)
    img_gray = cv2.drawKeypoints(img_gray, keypoints, np.array([]),
                                 (0, 255, 0),
                                 cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

    # Find transformation
    H = np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]], dtype=np.float32)
    if found_dots:
        # Generate reference points to compute the homography
        ref_pts = np.zeros((num_circles[0] * num_circles[1], 1, 2), np.float32)
        pos = 0
        for i in range(0, num_circles[1]):
            for j in range(0, num_circles[0]):
                ref_pts[
                    pos,
                    0, :] = spacing * np.array([j, i]) + np.array(pad_pixels)
                pos += 1

        H, mask = cv2.findHomography(centers, ref_pts, cv2.RANSAC, 1)
        if show_preview:
            dsize = [
                int((num_circs - 1) * space + 2 * pad_pixs) for num_circs,
                space, pad_pixs in zip(num_circles, spacing, pad_pixels)
            ]
            captured_img_warp = cv2.warpPerspective(captured_img, H,
                                                    tuple(dsize))

    if show_preview:
        fig = plt.figure()

        ax = fig.add_subplot(223)
        ax.imshow(img_gray, cmap='gray')

        ax2 = fig.add_subplot(221)
        ax2.imshow(img, cmap='gray')

        ax3 = fig.add_subplot(222)
        ax3.imshow(captured_img, cmap='gray')

        if found_dots:
            ax4 = fig.add_subplot(224)
            ax4.imshow(captured_img_warp, cmap='gray')

        plt.show()

    return found_dots, H
Exemple #5
0
# Standard imports

import cv2
import numpy as np

# Read image
#im = cv2.imread('blobsanty1.gif', cv2.IMREAD_GRAYSCALE)
im = cv2.imread('sudoku.jpg', 0)
#im = imageio.mimread("Imagen base")
cv2.imshow("Imagen base", im)
#cv2.waitKey(0)

# Set up the detector with default parameters.

detector = cv2.SimpleBlobDetector_create()()
print(type(detector))
# Detect blobs.
keypoints = detector.detect(im)
'''
# Draw detected blobs as red circles.

# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
im_with_keypoints = cv2.drawKeypoints(im, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

# Show keypoints

cv2.imshow("Keypoints", im_with_keypoints)
cv2.waitKey(0)'''
Exemple #6
0
def spot_center(img, minx, maxx, miny, maxy):
    crop_img = img[miny:maxy, minx:maxx]
    crop_img2 = crop_img.copy()
    h, w, c = crop_img.shape
    esquina = (0, 0)
    pmayor = []
    for y in range(h):
        for x in range(w):
            px = crop_img[y][x]
            pmayor.append(((px[0]), y, x))
    maximo = max(pmayor)
    centro = (maximo[1], maximo[2])
    spot = cv2.line(crop_img2, esquina, centro, (0, 255, 0), 1)
    diagonal = []
    for m in range(h):
        for n in range(w):
            pix = spot[m, n]
            if (pix[0], pix[1], pix[2]) == (0, 255, 0):
                diagonal.append(crop_img[m, n][0])
    diagonal = diagonal[-10:]
    imorig = np.array(crop_img)
    im_copy = imorig.copy()
    blanco = (255, 255, 255)
    negro = (0, 0, 0)
    n_centro = []
    n2_centro = []
    n3_centro = []
    for s in diagonal:
        rango = (s, s, s)
        black_pixels_mask = np.all(imorig <= rango, axis=-1)
        non_black_pixels_mask = np.any(imorig > rango, axis=-1)
        im_copy[black_pixels_mask] = [255, 255, 255]
        im_copy[non_black_pixels_mask] = [0, 0, 0]

        imorig2 = np.array(im_copy)
        negativo = cv2.bitwise_not(imorig2)
        gray = cv2.cvtColor(negativo, cv2.COLOR_BGR2GRAY)
        median = cv2.medianBlur(gray, 9)
        blur = cv2.GaussianBlur(median, (11, 11), 0)
        canny = cv2.Canny(blur, 30, 150, 3)
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
        dilated = cv2.dilate(canny, kernel)
        contornos = []
        verde = (0, 255, 0)
        (cnt, heirarchy) = cv2.findContours(dilated.copy(), cv2.RETR_EXTERNAL,
                                            cv2.CHAIN_APPROX_NONE)
        rgb = cv2.cvtColor(dilated, cv2.COLOR_BGR2RGB)
        cv2.drawContours(rgb, cnt, -1, verde, 2)

        white = np.any(rgb != verde, axis=-1)
        rgb[white] = [0, 0, 0]
        indices = np.where(np.all(rgb == verde, axis=-1))
        cen_circ = cv2.cvtColor(rgb, cv2.COLOR_BGR2GRAY)
        cen_circ = cv2.medianBlur(cen_circ, 5)

        params = cv2.SimpleBlobDetector_Params()
        params.filterByArea = True
        params.minArea = .01
        params.filterByCircularity = True
        params.minCircularity = 0.01  #0.6
        params.filterByConvexity = True
        params.minConvexity = 0.3
        params.filterByInertia = True
        params.minInertiaRatio = 0.001
        detector = cv2.SimpleBlobDetector_create(params)
        keypoints = detector.detect(cen_circ)
        for keyPoint in keypoints:
            x = round(keyPoint.pt[0])
            y = round(keyPoint.pt[1])
            s = keyPoint.size
            if len(keypoints) == 1:
                n_centro.append((y, x))
            if len(keypoints) == 2:
                n2_centro.append((y, x))
                n_centro = []
            if len(keypoints) == 3:
                n3_centro.append((y, x))
                n_centro = []
                n2_centro = []
        blank = np.zeros((1, 1))
        blobs = cv2.drawKeypoints(cen_circ, keypoints, blank, (255, 0, 0),
                                  cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
    contador = 0
    centro_spot = []
    for s in n_centro, n2_centro, n3_centro:
        if len(s) > 0:
            for L in range(0, contador + 1):
                centro_spot.append(s[L])
        contador = contador + 1
    return (centro_spot)
 def __init__(self):
     self.__detectorParams = cv.SimpleBlobDetector_Params()
     self.__detectorParams.filterByArea = True
     self.__detectorParams.maxArea = 1500
     self.detector = cv.SimpleBlobDetector_create(self.__detectorParams)
Exemple #8
0
    def detect_blob(self, img, param_part_id = "default"):
        """Compute the ratio of red area in the image.
      
        The returned value should be used to check if the precision gripper pick a
        part or not. Assumption here is that the picked part will decrease the
        ratio of red area; low value implies the gripper picks a part.
      
        Return True if bg_ratio < bg_threshold, False otherwise.
      
        :param img: RGB image, 8bit (0-255), numpy.ndarray, shape=(w, h, 3)
        :param int x: x-axis of Upper left of ROI.
        :param int y: y-axis of Upper left of ROI.
        :param int w: Width of ROI.
        :param int h: Height of ROI.
        """
        im_rgb = img
        im_gray = cv2.cvtColor(im_rgb,cv2.COLOR_BGR2GRAY)
    
        params = cv2.SimpleBlobDetector_Params()
    

        rospy.loginfo( rospy.get_name() + " parameter chosen: " + param_part_id)
        if(param_part_id == "none"):
            rospy.loginfo( rospy.get_name() +"in: " + param_part_id)
            # Segmentation Thresholds
            params.minThreshold = 100
            params.maxThreshold = 400
    
            # Filter by color
            params.filterByColor = True
            params.blobColor = 0
    
            # Filter by size of the blob.
            params.filterByArea = True
            params.minArea = 20
            params.maxArea = 150
    
            # Filter by Circularity
            params.filterByCircularity = True
            params.minCircularity = 0.7
    
            # Filter by Convexity
            params.filterByConvexity = False
            params.minConvexity = 0.87
    
            # Filter by Inertia
            params.filterByInertia = False
            params.minInertiaRatio = 0.01

        elif(param_part_id == "part_9"):
            rospy.loginfo( rospy.get_name() +"in: " + param_part_id)
            # Segmentation Thresholds
            params.minThreshold = 100
            params.maxThreshold = 400
    
            # Filter by color
            params.filterByColor = True
            params.blobColor = 0
    
            # Filter by size of the blob.
            params.filterByArea = True
            params.minArea = 20
            params.maxArea = 150
    
            # Filter by Circularity
            params.filterByCircularity = True
            params.minCircularity = 0.7
    
            # Filter by Convexity
            params.filterByConvexity = False
            params.minConvexity = 0.87
    
            # Filter by Inertia
            params.filterByInertia = False
            params.minInertiaRatio = 0.01

        elif(param_part_id == "part_15"):
            rospy.loginfo( rospy.get_name()+ "in: " + param_part_id)

            # Segmentation Thresholds
            params.minThreshold = 100
            params.maxThreshold = 400

            # Filter by color
            params.filterByColor = True
            params.blobColor = 0

            # Filter by size of the blob.
            params.filterByArea = True
            params.minArea = 20
            params.maxArea = 150

            # Filter by Circularity
            params.filterByCircularity = True
            params.minCircularity = 0.7

            # Filter by Convexity
            params.filterByConvexity = False
            params.minConvexity = 0.87

            # Filter by Inertia
            params.filterByInertia = False
            params.minInertiaRatio = 0.01
        else:
            # Segmentation Thresholds
            params.minThreshold = 100
            params.maxThreshold = 400

            # Filter by color
            params.filterByColor = True
            params.blobColor = 0

            # Filter by size of the blob.
            params.filterByArea = True
            params.minArea = 20
            params.maxArea = 150

            # Filter by Circularity
            params.filterByCircularity = True
            params.minCircularity = 0.7

            # Filter by Convexity
            params.filterByConvexity = False
            params.minConvexity = 0.87

            # Filter by Inertia
            params.filterByInertia = False
            params.minInertiaRatio = 0.01

    
       # Create a detector with the parameters
        ver = (cv2.__version__).split('.')
        if int(ver[0]) < 3:
            detector = cv2.SimpleBlobDetector(params)
        else:
            detector = cv2.SimpleBlobDetector_create(params)
    
        # Detect blobs.
        keypoints = detector.detect(im_gray)
    
        # Draw the key points 
        im_with_keypoints = cv2.drawKeypoints(im_rgb, keypoints, np.array([]), (255, 0, 0), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
    
        # Show blobs

        cv2.imwrite("/root/catkin_ws/src/o2as_blob_detection/img_res/blob_detection_"+param_part_id+"_results.png", cv2.cvtColor(im_with_keypoints, cv2.COLOR_BGR2RGB))
        #print("before publish")

        try:
          print("publish")
          self.pub_img_w_blob.publish(self.bridge.cv2_to_imgmsg(im_with_keypoints, "rgb8"))
        except CvBridgeError as e:
          print(e)

        self.current_image_blob = self.bridge.cv2_to_imgmsg(im_with_keypoints, "rgb8")

        blob_array = []
     
        if(len(keypoints)):
            for i in range(len(keypoints)):
                blob= geometry_msgs.msg.Point()
                blob.x = keypoints[i].pt[0]
                blob.y = keypoints[i].pt[1]
                blob_array.append(blob)
            return True, blob_array

        else:  

            return False, blob_array
    def __init__(self):
        self.active = True  # [INTERACTIVE MODE] Won't be overwritten if FSM isn't running, node always active
        self.first_timestamp = 0
        self.capture_finished = True
        self.tinit = None
        self.trigger = True
        self.node_state = 0
        self.data = []

        # Needed to publish images
        self.bridge = CvBridge()

        # Node name
        self.node_name = rospy.get_name()

        # Capture time
        self.capture_time = 0.5

        # Parameters
        self.DTOL = 15

        # Use FFT or heuristics
        self.useFFT = True
        self.freqIdentify = []

        # Cropping
        self.cropNormalizedRight = [[0.1, 0.67], [0.6, 1.0]]
        self.cropNormalizedFront = [[0.1, 0.5], [0.13, 0.5]]
        self.cropNormalizedTL = [[0.0, 0.25], [0.25, 0.75]]

        # Setup SimpleBlobDetector parameters
        params = cv2.SimpleBlobDetector_Params()  # Change thresholds
        params.minThreshold = 5
        # params.maxThreshold = 200
        params.maxThreshold = 75
        params.thresholdStep = 10

        # Filter by Area.
        params.filterByArea = True
        params.minArea = 10 * 10 * 3.14
        params.maxArea = 20 * 20 * 3.14

        # Filter by Circularity
        params.filterByCircularity = True
        params.minCircularity = 0.8

        # Filter by Convexity
        params.filterByConvexity = True
        params.minConvexity = 0.8

        # Filter by Inertia
        params.filterByInertia = False
        params.minInertiaRatio = 0.05

        # Parameters
        params_car = params
        params_tl = params

        # Change parameters for the traffic light
        params_tl.minArea = 5 * 5 * 3.14
        params_tl.maxArea = 15 * 15 * 3.14

        # Create a detector with the parameters
        self.detector_car = cv2.SimpleBlobDetector_create(params_car)
        self.detector_tl = cv2.SimpleBlobDetector_create(params_tl)

        # Publish
        self.pub_raw_detections = rospy.Publisher("~raw_led_detection",
                                                  LEDDetectionArray,
                                                  queue_size=1)
        self.pub_image_right = rospy.Publisher("~image_detection_right",
                                               Image,
                                               queue_size=1)
        self.pub_image_front = rospy.Publisher("~image_detection_front",
                                               Image,
                                               queue_size=1)
        self.pub_image_TL = rospy.Publisher("~image_detection_TL",
                                            Image,
                                            queue_size=1)
        self.pub_detections = rospy.Publisher("~led_detection",
                                              SignalsDetection,
                                              queue_size=1)
        self.pub_debug = rospy.Publisher("~debug_info",
                                         LEDDetectionDebugInfo,
                                         queue_size=1)
        self.veh_name = rospy.get_namespace().strip("/")

        # Subscribed
        self.sub_cam = rospy.Subscriber("camera_node/image/compressed",
                                        CompressedImage, self.camera_callback)
        self.sub_trig = rospy.Subscriber("~trigger", Byte,
                                         self.trigger_callback)
        self.sub_switch = rospy.Subscriber("~switch", BoolStamped,
                                           self.cbSwitch)

        # Additional parameters
        self.protocol = rospy.get_param("~LED_protocol")
        #self.capture_time         = rospy.get_param("~capture_time")
        self.continuous = rospy.get_param(
            '~continuous', False)  # Detect continuously as long as active
        # [INTERACTIVE MODE] set to False for manual trigger
        # Cell size (needed for visualization)
        self.cell_size = rospy.get_param("~cell_size")
        self.crop_rect_norm = rospy.get_param("~crop_rect_normalized")

        # Get frequency to indentify
        self.freqIdentify = [
            self.protocol['signals']['CAR_SIGNAL_A']['frequency'],
            self.protocol['signals']['CAR_SIGNAL_PRIORITY']['frequency'],
            self.protocol['signals']['CAR_SIGNAL_SACRIFICE_FOR_PRIORITY']
            ['frequency']
        ]
        #print '---------------------------------------------------------------'
        #print self.freqIdentify
        #print '---------------------------------------------------------------'

        #rospy.loginfo('[%s] Config: \n\t crop_rect_normalized: %s, \n\t capture_time: %s, \n\t cell_size: %s'%(self.node_name, self.crop_rect_normalized, self.capture_time, self.cell_size))

        # Check vehicle name
        if not self.veh_name:
            # fall back on private param passed thru rosrun
            # syntax is: rosrun <pkg> <node> _veh:=<bot-id>
            if rospy.has_param('~veh'):
                self.veh_name = rospy.get_param('~veh')

        if not self.veh_name:
            raise ValueError('Vehicle name is not set.')

        # Loginfo
        rospy.loginfo('[%s] Vehicle: %s' % (self.node_name, self.veh_name))
        rospy.loginfo('[%s] Waiting for camera image...' % self.node_name)
Exemple #10
0
blobParams.maxArea = 2500  # maxArea may be adjusted to suit for your experiment

# Filter by Circularity
blobParams.filterByCircularity = True
blobParams.minCircularity = 0.1

# Filter by Convexity
blobParams.filterByConvexity = True
blobParams.minConvexity = 0.87

# Filter by Inertia
blobParams.filterByInertia = True
blobParams.minInertiaRatio = 0.01

# Create a detector with the parameters
blobDetector = cv2.SimpleBlobDetector_create(blobParams)

# Create the iteration criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
###################################################################################################

imgDir = "imgSequence"  # Specify the image directory
imgFileNames = [os.path.join(imgDir, fn) for fn in next(os.walk(imgDir))[2]]
nbOfImgs = len(imgFileNames)

for i in range(0, nbOfImgs - 1):
    img = cv2.imread(imgFileNames[i], cv2.IMREAD_COLOR)
    imgRemapped = cv2.remap(img, map1, map2, cv2.INTER_LINEAR,
                            cv2.BORDER_CONSTANT)  # for fisheye remapping
    imgRemapped_gray = cv2.cvtColor(
        imgRemapped,
    def classify(self, matArray):
        """
        To-Do:
        Bitte Kommentar bzw. Dokumentaion erstellen!
        """
        self.__logger.info("Starting classify()",
                           "NoteHeightBlobClassifier:classify")

        # Iterate about every note
        for i in range(0, len(matArray[self.__indexOfProcessMatWithoutLines])):

            imageWithLines = matArray[self.__indexOfProcessMatWithLines][i]

            cv2.imshow("Mit Linien", imageWithLines)

            # Apply edge detector canny
            edges = cv2.Canny(imageWithLines,
                              threshold1=self.__cannyThreshold1,
                              threshold2=self.__cannyThreshold2,
                              apertureSize=self.__cannyApertureSize)

            # Apply hough line detection
            lines = cv2.HoughLines(edges,
                                   rho=self.__houghLinesRho,
                                   theta=self.__houghLinesTheta,
                                   threshold=self.__houghLinesThreshold)

            # Extract array of hough line coordinates
            lines_coordinates = []
            for x in range(0, len(lines)):
                for rho, theta in lines[x]:

                    # Calculation of line coordinates
                    a = np.cos(theta)
                    b = np.sin(theta)
                    y0 = b * rho
                    y1 = int(y0 + 1000 * (a))
                    y2 = int(y0 - 1000 * (a))

                    # Extract horizontal lines
                    if (abs(y1 - y2) <= self.__maxGradeOfLinesInPx):
                        lines_coordinates.append([y1, y2])

                        print([y1, y2])

            # Sort coordinates of hough line
            lines_coordinates.sort(key=itemgetter(0))

            #
            yPositionFirsLine = (
                ((lines_coordinates[0][0] + lines_coordinates[0][1]) / 2) +
                ((lines_coordinates[1][0] + lines_coordinates[1][1]) / 2)) / 2
            yPositionLastLine = (
                ((lines_coordinates[len(lines_coordinates) - 1][0] +
                  lines_coordinates[len(lines_coordinates) - 1][1]) / 2) +
                ((lines_coordinates[len(lines_coordinates) - 2][0] +
                  lines_coordinates[len(lines_coordinates) - 2][1]) / 2)) / 2

            # Berechne die Anzahl der Pixel von einer Notenhoehe zur naechsten Notenhoehe
            numberOfLines = 5
            noteStep = (yPositionFirsLine +
                        yPositionLastLine) / (numberOfLines - 1) / 2

            # Draw hough lines to image
            showMat = cv2.cvtColor(
                src=matArray[self.__indexOfProcessMatWithoutLines][i],
                code=cv2.COLOR_GRAY2BGR)

            # Create array for mats and line groups
            noteLineCoordinates = []
            line_groupe = []
            lineThickness = 1
            lineColor = (0, 255, 0)

            for j in range(0, len(lines_coordinates) - 1):
                """
                Get line coordinates of line
                """
                line = lines_coordinates[j]
                """
                Draw line
                """
                cv2.line(showMat, (0, line[0]),
                         (showMat.shape[1] - 1, line[1]), lineColor,
                         lineThickness)

            # Read image
            imageWithoutLines = matArray[
                self.__indexOfProcessMatWithoutLines][i]
            imageWithoutLines = 255 - imageWithoutLines
            #cv2.waitKey(0)
            cv2.imshow("Test1", imageWithoutLines)
            #cv2.waitKey(0)
            imageWithoutLines = 255 - cv2.dilate(
                imageWithoutLines, np.ones((1, 10)), iterations=1)
            cv2.imshow("Test2", imageWithoutLines)
            imageWithoutLines = cv2.dilate(imageWithoutLines,
                                           np.ones((1, 16)),
                                           iterations=1)
            cv2.imshow("Test3", imageWithoutLines)
            cv2.waitKey(0)

            # Setup SimpleBlobDetector parameters.
            params = cv2.SimpleBlobDetector_Params()

            # Change thresholds
            # params.minThreshold = 0
            # params.maxThreshold = 255

            # Filter by Area.
            params.filterByArea = True
            params.minArea = 20
            # params.maxArea = 500000

            # Filter by Color
            # params.filterByColor = True
            # params.blobColor = 0

            # Filter by Circularity
            # params.filterByCircularity = False
            # params.minCircularity = 0.0

            # Filter by Convexity
            params.filterByConvexity = False
            # params.minConvexity = 0.87

            # Filter by Inertia
            # params.filterByInertia = False
            # params.minInertiaRatio = 0.01

            # Create a detector with the parameters
            detector = cv2.SimpleBlobDetector_create(params)

            # Detect blobs.
            keypoints = detector.detect(imageWithoutLines)

            if (not (len(keypoints) < 1)):

                # Draw detected blobs as red circles.
                # cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures
                # the size of the circle corresponds to the size of blob

                im_with_keypoints = cv2.drawKeypoints(
                    imageWithoutLines, keypoints, np.array([]), (0, 0, 255),
                    cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

                print("Row", imageWithoutLines.shape[0])
                print("Column", imageWithoutLines.shape[1])
                for point in keypoints:
                    print("x=", point.pt[0], "y=", point.pt[1])
                    cv2.circle(showMat, (int(point.pt[0]), int(point.pt[1])),
                               1, (0, 255, 255), 3)

                difference = yPositionFirsLine - keypoints[0].pt[1]

                note_nr = round(difference / noteStep * 2, 0)

                txt = ""
                if (note_nr == 0):
                    txt = "f''" + str(note_nr)
                elif (note_nr == -1):
                    txt = "e''" + str(note_nr)
                elif (note_nr == -2):
                    txt = "d''" + str(note_nr)
                elif (note_nr == -3):
                    txt = "c''" + str(note_nr)
                elif (note_nr == -4):
                    txt = "h'" + str(note_nr)
                elif (note_nr == -5):
                    txt = "a'" + str(note_nr)
                elif (note_nr == -6):
                    txt = "g'" + str(note_nr)
                elif (note_nr == -7):
                    txt = "f'" + str(note_nr)
                elif (note_nr == -8):
                    txt = "e'" + str(note_nr)
                elif (note_nr == -9):
                    txt = "d'" + str(note_nr)
                else:
                    txt = "-" + str(note_nr)

                #cv2.addText(showMat, str(note_nr), (5, 5), font, 4, (243, 127, 53), 2, cv2.LINE_AA)
                cv2.putText(showMat, txt, (0, 75), cv2.FONT_HERSHEY_SIMPLEX,
                            0.35, 255)

                # Show blobs
                cv2.imshow("Keypoints", im_with_keypoints)
                cv2.imshow("ShowMat", showMat)
                cv2.waitKey(0)
def SIMPLEBLOB_feature(img):

    simbpleblob = cv2.SimpleBlobDetector_create()
    kp = simbpleblob.detect(img)

    return kp
                      _max_variation=0.025,
                      _min_diversity=0.8,
                      _max_evolution=200,
                      _area_threshold=1.01,
                      _min_margin=0.003,
                      _edge_blur_size=3)
orb = cv.ORB_create(edgeThreshold=31,
                    patchSize=31,
                    nlevels=8,
                    fastThreshold=12,
                    scaleFactor=1.2,
                    WTA_K=2,
                    scoreType=cv.ORB_HARRIS_SCORE,
                    firstLevel=0,
                    nfeatures=500000)
sbd = cv.SimpleBlobDetector_create()  # See params for more
boost = cv.xfeatures2d.BoostDesc_create(use_scale_orientation=False,
                                        scale_factor=6.25)
brief = cv.xfeatures2d.BriefDescriptorExtractor_create(bytes=32,
                                                       use_orientation=False)
daisy = cv.xfeatures2d.DAISY_create(radius=15.0,
                                    q_radius=3,
                                    q_theta=8,
                                    q_hist=8,
                                    norm=cv.xfeatures2d.DAISY_NRM_NONE,
                                    interpolation=True,
                                    use_orientation=False)
freak = cv.xfeatures2d.FREAK_create(orientationNormalized=False,
                                    scaleNormalized=False,
                                    patternScale=22.0,
                                    nOctaves=4)
Exemple #14
0
	def __init__(self, img, feature):
		
		# sharpening
		gray_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
		blurred = cv2.bilateralFilter(gray_image, 9, 75, 75)
		gray_image = cv2.addWeighted(gray_image, 1.5, blurred, -0.5, 0)
		gray_image = cv2.bilateralFilter(gray_image, 9, 75, 75)
	
		# Extract ROI		
		x,y,w,h = cv2.boundingRect(gray_image)   # (x=0,y=0,w=1920,h=1080)
		#print("x,y,w,h: ", x,y,w,h)
		self.roi = img[y+int(h/2.5):y + h-int(h/2.5) , x+int(w/2.5):x + w-int(w/2.5)]
		
		# Draw a bounding of ROI
		self.img_with_bounding = img.copy()
		cv2.rectangle(self.img_with_bounding, (x+int(w/2.5), y+int(h/2.5)), (x + w-int(w/2.5), y + h-int(h/2.5)), (255, 0, 0), 2)
		
		# Find Needle position
		self.gray_roi = cv2.cvtColor(self.roi, cv2.COLOR_BGR2GRAY)
		x,y,w,h = cv2.boundingRect(self.gray_roi)
		#print("xr,yr,wr,hr: ", x,y,w,h)
		self.needle_pose = np.array([[w/2, h/2]]) 
		
		# Otsu's thresholding
		ret,self.th = cv2.threshold(self.gray_roi,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
		
		# Otsu's thresholding after Gaussian filtering
		self.blur = cv2.GaussianBlur(self.gray_roi,(5,5),0)
		ret2,self.th2 = cv2.threshold(self.blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
		
		# Morphological filtering
		kernel = np.ones((2,2),np.uint8)
		self.dilation = cv2.dilate(self.th,kernel,iterations = 1)
		#opening = cv2.morphologyEx(th, cv2.MORPH_OPEN, kernel)
		
		if feature == 'ORB':
			# Initiate ORB object
			orb = cv2.ORB_create(nfeatures = 5000, scaleFactor = 1.1, nlevels = 10, scoreType = cv2.ORB_FAST_SCORE, patchSize = 100)

			# find the keypoints with ORB
			keypoints = orb.detect(self.gray_roi, None)

			# compute the descriptors with ORB
			keypoints, descriptors = orb.compute(self.gray_roi, keypoints)
			self.point2f = cv2.KeyPoint_convert(keypoints)

			# retval = cv2.ORB.getMaxFeatures(orb)
			# print('retval: ',retval)
			# print('number of Kp: ', len(keypoints))
		elif feature == 'FAST':
			# Initiate FAST object with default values
			fast = cv2.FastFeatureDetector_create(10,True,2)  

			# TYPE_5_8 = 0, TYPE_7_12 = 1, TYPE_9_16 = 2

			# find and draw the keypoints
			keypoints = fast.detect(self.th2, None)
			self.point2f = cv2.KeyPoint_convert(keypoints)
		
		elif feature == 'BLOB':
			# Setup SimpleBlobDetector parameters.
			params = cv2.SimpleBlobDetector_Params()

			# Change thresholds
			params.minThreshold = 10
			params.maxThreshold = 200

			# Filter by Area.
			params.filterByArea = True
			params.minArea = 5

			# Filter by Circularity
			params.filterByCircularity = True
			params.minCircularity = 0.1

			# Filter by Convexity
			params.filterByConvexity = True
			params.minConvexity = 0.5

			# Filter by Inertia
			params.filterByInertia = True
			params.minInertiaRatio = 0.01

			# Create a detector with the parameters
			ver = (cv2.__version__).split('.')
			if int(ver[0]) < 3:
				detector = cv2.SimpleBlobDetector(params)
			else:
				detector = cv2.SimpleBlobDetector_create(params)

			# Detect blobs.
			keypoints = detector.detect(self.gray_roi)
			self.point2f = cv2.KeyPoint_convert(keypoints)
		
		else:
			print('Error in feature type')
			sys.exit(1)
			
		# draw only the location of the keypoints without size or orientation
		self.final_keypoints = cv2.drawKeypoints(self.roi, keypoints, None, color=(0,255,0), flags=0)

		# split channel
		b_channel, g_channel, r_channel = cv2.split(self.final_keypoints)
		alpha_channel = np.ones(b_channel.shape, dtype=b_channel.dtype) * 50 #creating a dummy alpha channel image.
		img_BGRA = cv2.merge((b_channel, g_channel, r_channel, alpha_channel))
		
		# Create new layers
		layer_1 = np.zeros((h, w, 4))
		layer_2 = np.zeros((h, w, 4))
		
		# Draw a blue line with thickness of 1 px on layer_1
		cv2.line(layer_1,(int(w/2)-20,int(h/2)),(int(w/2)+20,int(h/2)),(255,0,0,255),1)
		cv2.line(layer_1,(int(w/2),int(h/2)-20),(int(w/2),int(h/2)+20),(255,0,0,255),1)
		
		# cv2.line(layer_1,(int(w/2)-60,int(h/2)),(int(w/2)-20,int(h/2)),(255,0,0,255),1)
		# cv2.line(layer_1,(int(w/2)-40,int(h/2)-20),(int(w/2)-40,int(h/2)+20),(255,0,0,255),1)
		
		# Draw a red closed circle on layer_2
		cv2.circle(layer_2,(int(w/2),int(h/2)), 10, (0,0,255,255), 1)
		
		# copy the first layer into the resulting image
		self.reimg = img_BGRA[:]  
		
		#overlay each drawing parts
		cnd = layer_1[:, :, 3] > 0
		self.reimg[cnd] = layer_1[cnd]
		cnd = layer_2[:, :, 3] > 0
		self.reimg[cnd] = layer_2[cnd]
Exemple #15
0
 def load_blob_detector(self):
     detector_params = cv2.SimpleBlobDetector_Params()
     detector_params.filterByArea = True
     detector_params.maxArea = 1500
     self.detector = cv2.SimpleBlobDetector_create(detector_params)
Exemple #16
0
def pothole(img_path, id):
    fullpath = "C:/Users/Inderjeet Saluja/Desktop/projects/pothole_detection" + img_path
    image = cv2.imread(fullpath)

    frame = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    blur = cv2.GaussianBlur(frame, (7, 7), 1)
    median = cv2.medianBlur(blur, 9)
    dilate = cv2.dilate(median, None, iterations=2)
    kernel = np.ones((5, 5), np.uint8)
    opening = cv2.morphologyEx(dilate, cv2.MORPH_CLOSE, kernel)
    erode = cv2.dilate(opening, None, iterations=1)
    edges = cv2.Canny(erode, 80, 160)

    contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE,
                                           cv2.CHAIN_APPROX_NONE)
    # contours = imutils.grab_contours(contours)
    # c = max(contours,key=cv2.contourArea)
    # focalLength = (c[1][0] * 24.0)/11.0

    # Setup SimpleBlobDetector parameters.
    params = cv2.SimpleBlobDetector_Params()

    # Change thresholds
    params.minThreshold = 10
    params.maxThreshold = 200

    # Filter by Area.
    params.filterByArea = True
    params.minArea = 40

    # Filter by Circularity
    params.filterByCircularity = True
    params.minCircularity = 0.1

    # Filter by Convexity
    params.filterByConvexity = True
    params.minConvexity = 0.87

    # Filter by Inertia
    params.filterByInertia = True
    params.minInertiaRatio = 0.08

    # Detect the blobs in the image
    if cv2.__version__.startswith('2.'):
        detector = cv2.SimpleBlobDetector(params)
    else:
        detector = cv2.SimpleBlobDetector_create(params)

    # Draw detected keypoints as blue circles
    keypoints = detector.detect(erode)
    print("Total No of potholes are:")
    print(len(keypoints))
    print(keypoints)

    if (len(keypoints) >= 1):
        print("Pothole Found")
    else:
        print("No Pothole Found")

    # Get Heatmap
    frame = cv2.applyColorMap(erode, cv2.COLORMAP_HSV)
    # cv2.drawContours(frame, contours, -1, (94,206,165), 2)
    cv2.drawContours(frame, contours, -1, (255, 0, 149), 2)

    # Get the moments
    mu = [None] * len(contours)
    for i in range(len(contours)):
        mu[i] = cv2.moments(contours[i])

    # Get Area, Length, ARC Length
    for i in range(len(contours)):
        print(
            ' * Contour[%d] - Area (M_00) = %.2f - Area OpenCV: %.2f - Length: %.2f'
            % (i, mu[i]['m00'], cv2.contourArea(
                contours[i]), cv2.arcLength(contours[i], True)))
        # ellipse = cv2.fitEllipse(contours[i])
        # cv2.ellipse(frame, ellipse, (0, 255, 0), 2)

    # Display the image
    cv2.imshow("Pothole_Image_v", frame)
    cv2.imshow("Org_Image_v", image)
    cv2.imwrite("processed.jpg", frame)
    cv2.imwrite("original.jpg", image)
    cv2.waitKey()
    cv2.destroyAllWindows()
def main():

    # argument parsing

    parser = argparse.ArgumentParser()
    parser.add_argument("color_depth",
                        help="integer number of colors to use to draw temps",
                        type=int)
    parser.add_argument('--headless',
                        help='run the pygame headlessly',
                        action='store_true')
    args = parser.parse_args()

    MAXTEMP = 31  # initial max temperature
    COLORDEPTH = args.color_depth  # how many color values we can have
    AMBIENT_OFFSET = 9  # value to offset ambient temperature by to get rolling MAXTEMP
    AMBIENT_TIME = 3000  # length of ambient temperature collecting intervals in seconds

    # create data folders if they don't exist
    if not os.path.exists(get_filepath('../img')):
        os.makedirs(get_filepath('../img'))
    if not os.path.exists(get_filepath('../data')):
        os.makedirs(get_filepath('../data'))
    if not os.path.exists(get_filepath('../video')):
        os.makedirs(get_filepath('../video'))

    # empty the images folder
    for filename in os.listdir(get_filepath('../img/')):
        if filename.endswith('.jpeg'):
            os.unlink(get_filepath('../img/') + filename)

    i2c_bus = busio.I2C(board.SCL, board.SDA)

    # For headless pygame
    if args.headless:
        os.putenv('SDL_VIDEODRIVER', 'dummy')
    else:
        os.putenv('SDL_FBDEV', '/dev/fb1')

    pygame.init()

    # initialize the sensor
    sensor = adafruit_amg88xx.AMG88XX(i2c_bus)

    points = [(math.floor(ix / 8), (ix % 8)) for ix in range(0, 64)]
    grid_x, grid_y = np.mgrid[0:7:32j, 0:7:32j]

    # sensor is an 8x8 grid so lets do a square
    height = 240
    width = 240

    # the list of colors we can choose from
    blue = Color("indigo")
    colors = list(blue.range_to(Color("red"), COLORDEPTH))

    # create the array of colors
    colors = [(int(c.red * 255), int(c.green * 255), int(c.blue * 255))
              for c in colors]

    displayPixelWidth = width / 30
    displayPixelHeight = height / 30

    lcd = pygame.display.set_mode((width, height))

    lcd.fill((255, 0, 0))

    pygame.display.update()
    pygame.mouse.set_visible(False)

    lcd.fill((0, 0, 0))
    pygame.display.update()

    # Setup SimpleBlobDetector parameters.
    params = cv2.SimpleBlobDetector_Params()

    # # Change thresholds
    params.minThreshold = 10
    params.maxThreshold = 255

    # # Filter by Area.
    params.filterByArea = True
    params.minArea = 250

    # # Filter by Circularity
    params.filterByCircularity = True
    params.minCircularity = 0.1

    # # Filter by Convexity
    params.filterByConvexity = False
    params.minConvexity = 0.87

    # # Filter by Inertia
    params.filterByInertia = False
    params.minInertiaRatio = 0.01

    # # Set up the detector with default parameters.
    detector = cv2.SimpleBlobDetector_create(params)

    # initialize centroid tracker
    ct = CentroidTracker()

    # let the sensor initialize
    time.sleep(.1)

    # press key to exit
    screencap = True

    # json dump
    data = {}
    data['sensor_readings'] = []

    # array to hold mode of last 10 minutes of temperatures
    mode_list = []

    print('sensor started!')

    start_time = time.time()

    while (screencap):
        start = time.time()
        date = datetime.now()
        # read the pixels
        pixels = []
        for row in sensor.pixels:
            pixels = pixels + row

        data['sensor_readings'].append({
            'time': datetime.now().isoformat(),
            'temps': pixels,
            'count': ct.get_count()
        })
        mode_result = stats.mode([round(p) for p in pixels])
        mode_list.append(int(mode_result[0]))

        # instead of taking the ambient temperature over one frame of data take it over a set amount of time
        MAXTEMP = float(np.mean(mode_list)) + AMBIENT_OFFSET
        pixels = [
            map_value(p,
                      np.mean(mode_list) + 2, MAXTEMP, 0, COLORDEPTH - 1)
            for p in pixels
        ]

        # perform interpolation
        bicubic = griddata(points, pixels, (grid_x, grid_y), method='cubic')

        # draw everything
        for ix, row in enumerate(bicubic):
            for jx, pixel in enumerate(row):
                try:
                    pygame.draw.rect(
                        lcd, colors[constrain(int(pixel), 0, COLORDEPTH - 1)],
                        (displayPixelHeight * ix, displayPixelWidth * jx,
                         displayPixelHeight, displayPixelWidth))
                except:
                    print("Caught drawing error")
        pygame.display.update()

        surface = pygame.display.get_surface()

        # frame saving
        folder = get_filepath('../img/')
        filename = str(date) + '.jpeg'
        pygame.image.save(surface, folder + filename)

        img = pygame.surfarray.array3d(surface)
        img = np.swapaxes(img, 0, 1)

        # Read image
        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        img = cv2.bitwise_not(img)

        # Detect blobs.
        keypoints = detector.detect(img)

        for i in range(0, len(keypoints)):
            x = keypoints[i].pt[0]
            y = keypoints[i].pt[1]

            # print little circle
            pygame.draw.circle(lcd, (200, 0, 0), (int(x), int(y)), 7, 3)

        # update  our centroid tracker using the detected centroids
        ct.update(keypoints)

        pygame.display.update()

        for event in pygame.event.get():
            if event.type == pygame.KEYDOWN:
                print('terminating...')
                screencap = False
                break

        # for running the save on for a certain amount of time
        # if time.time() - start_time >= 10:
        #    print('terminating...')
        #    screencap = False

        # empty mode_list every AMBIENT_TIME seconds
        if len(mode_list) > AMBIENT_TIME:
            mode_list = []
        time.sleep(max(1. / 25 - (time.time() - start), 0))

    # write raw sensor data to file
    data_index = 0
    while os.path.exists(
            get_filepath('../data/') + 'data%s.json' % data_index):
        data_index += 1
    data_path = str(get_filepath('../data/') + 'data%s.json' % data_index)

    with open(data_path, 'w+') as outfile:
        json.dump(data, outfile, indent=4)

    # stitch the frames together
    dir_path = get_filepath('../img/')
    ext = '.jpeg'

    out_index = 0
    while os.path.exists(
            get_filepath('../video/') + 'output%s.avi' % out_index):
        out_index += 1
    output = str(get_filepath('../video/') + 'output%s.avi' % out_index)

    framerate = 10

    # get files from directory
    images = []
    for f in os.listdir(dir_path):
        if f.endswith(ext):
            images.append(f)

    # sort files
    images = sorted(images,
                    key=lambda x: datetime.strptime(
                        x.split('.j')[0], '%Y-%m-%d %H:%M:%S.%f'))
    # determine width and height from first image
    image_path = os.path.join(dir_path, images[0])
    frame = cv2.imread(image_path)
    if not args.headless:
        cv2.imshow('video', frame)
    height, width, channels = frame.shape

    # Define the codec and create VideoWriter object
    fourcc = cv2.VideoWriter_fourcc(*'XVID')  # Be sure to use lower case
    out = cv2.VideoWriter(output, fourcc, framerate, (width, height))

    for image in images:

        image_path = os.path.join(dir_path, image)
        frame = cv2.imread(image_path)

        out.write(frame)  # Write out frame to video

        if not args.headless:
            cv2.imshow('video', frame)
            if (cv2.waitKey(1) & 0xFF) == ord('q'):  # Hit `q` to exit
                break

    print('video created!')
    # Release everything if job is finished
    out.release()
    cv2.destroyAllWindows()
Exemple #18
0
def find_IR_blobs(fname):
    try:
        ir_img = cv2.imread(fname)
    except:
        print('Problem Loading File!')
        return
    ir_imgBW = np.mean(ir_img,2)


    for i in range(3):
        ir_img[:,:,i]=ir_imgBW

    ir_img[0:60][:][:]=0
    ir_img[220:][:][:]=0
    ir_img[100:140,100:140,:]=0

    # Setup SimpleBlobDetector parameters.
    params = cv2.SimpleBlobDetector_Params()
    params.blobColor = 255
    # Filter by Area.
    params.filterByArea = True
    params.minArea = 1

    params.filterByConvexity=True
    params.filterByCircularity = True
    params.minCircularity = 0.15
    params.minInertiaRatio = 0.15
    # Change thresholds
    params.minThreshold = 150;
    params.maxThreshold = 255;
    params.minConvexity = 0.25

    detector = cv2.SimpleBlobDetector_create(params)

    keypoints = detector.detect(ir_img)

    im_with_keypoints = cv2.drawKeypoints(ir_img, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

    f=plt.figure()
    ax=f.add_subplot(111)
    # Show blobs
    ax.imshow( np.maximum(im_with_keypoints[:,:,1],ir_imgBW) ,cmap='Greys_r')
    #ax.imshow(ir_img, cmap='Greys_r')

    ells = [Ellipse(xy=[keypoints[i].pt[0],keypoints[i].pt[1]], width=3*keypoints[i].size, height=3*keypoints[i].size, angle=0) for i in range(len(keypoints))]
    for e in ells:
        ax.add_artist(e)
        e.set_facecolor('none')


    for k in keypoints:
        #ax.plot(k.pt[0],k.pt[1],'rx')
        ax.text(k.pt[0],k.pt[1]*1.07,np.round(k.size,1),color='white')

    ax.set_xlim([0,240])
    ax.set_ylim([240,0])
    ax.set_xlabel(fname.split('.')[0])

    f.show()
    f.savefig('processed_'+file_name)

    return [file_name,keypoints]
Exemple #19
0
HG_old_value = 0
HR_trac_value = 255
HR_old_value = 0

blobparams = cv2.SimpleBlobDetector_Params()
blobparams.filterByCircularity = False
blobparams.filterByArea = True
blobparams.minArea = 100  #10
blobparams.maxArea = 100000
#blobparams.filterByColor= True
#blobparams.blobColor=255
blobparams.minDistBetweenBlobs = 200
##blobparams.filterByConvexity = False
##blobparams.maxConvexity = 3000

detector = cv2.SimpleBlobDetector_create(blobparams)

kernel = np.ones((11, 11), np.uint8)


def LB_updateValue(new_value):
    global LB_trac_value
    LB_trac_value = new_value
    return


def LG_updateValue(new_value):
    global LG_trac_value
    LG_trac_value = new_value
    return
Exemple #20
0
cam = cv2.VideoCapture(0)
cam.set(cv2.CAP_PROP_FPS, 30)

#importa o classificador de face
face_cascade = cv2.CascadeClassifier(
    path.path('haarcascades') + '/haarcascade_frontalface_default.xml')
#importa o classificador de olho
eye_cascade = cv2.CascadeClassifier(
    path.path('haarcascades') + '/haarcascade_eye.xml')

#Iniciando o algoritmo Blob
blob_parm = cv2.SimpleBlobDetector_Params()
blob_parm.filterByArea = True
blob_parm.maxArea = 1500  #unidade em pixels
blob_dt = cv2.SimpleBlobDetector_create(blob_parm)

#Desenha as barras deslizantes na janela
cv2.namedWindow(name)
#Direito
cv2.createTrackbar(bar_name_left, name, 0, 255, nothing)
#Esquerdo
cv2.createTrackbar(bar_name_right, name, 0, 255, nothing)

while (True):
    ret, frame = cam.read()

    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    #aplica o algoritmo de detecção facial
    faces = face_cascade.detectMultiScale(gray, scaleFactor, minNeighbors)
Exemple #21
0
def detect_notch(img, img_thresh):
    """
    Detects if a notch is present in the image, and if so, returns True.

    First, the Hough Circle Transform is applied to find a circle corresponding
to the entire eyeball. This circle is subtracted from the thresholded
image of the eyeball. Ideally what is left at this point will be either
a notch, or nothing. Since we will likely pick up "shreds" left from the
edges of the subtraction, we contract and dilate at this point to remove
leftovers.

    A region of interest is positioned over where notches appear usually,
    which is at about the 45 degree mark on the eyeball in the NE quadrant.
    Blob detection is run over this ROI. If a blob is detected, it is assumed
    that the blob is a notch, and the function can return True.
    """
    circles = hough_circles(img)

    # Paint out the first circle detected. Assume that only one circle was
    # detected for whole image. If no circles are detected, fail fast and just
    # return false.
    if circles:
        x, y, r = circles[0]
        cv.circle(img_thresh, (x, y), r, (0, 0, 0), cv.FILLED)
        #plt.subplot(154)
        #plt.imshow(draw_hough_circles(img_thresh, circles))
        #plt.title("After Hough Circles?")
        #print(x)
        #print(y)
        #print(r)
    else:
        return False

    # Erode what's left to try and remove edges.
    img_thresh = cv.erode(img_thresh, np.ones((3, 3), np.uint8))
    img_thresh = cv.dilate(img_thresh, np.ones((3, 3), np.uint8))

    # Extract a region of interest that is very likely to contain the notch if
    # one is present in the image. This corresponds to a small square at about
    # the 45 degree mark on the eyeball. Some notches are slightly lower than
    # this, so the ROI should be large enough to capture many notch positions.
    ratio = 1.0 / 4.0
    roi_size = img_thresh.shape[0] * ratio
    half_rs = roi_size / 2.0

    # Do a little trig to find cartesian coordinates of 45 degrees point. to get where the notch is
    radius = img_thresh.shape[0] / 2.0
    angle = math.pi / 4.0
    side = radius * math.sin(angle)

    # Get the damned ROI.
    x, y = (int(radius + side - half_rs), int(radius - side - half_rs))
    roi = img_thresh[y:int(y + roi_size), x:int(x + roi_size)]

    # Run blob detection on what's left.
    sbd = cv.SimpleBlobDetector_create(BLOB_PARAMS)
    keypoints = sbd.detect(roi)

    here = [x, y, radius]

    plt.subplot(164)
    plt.imshow(draw_hough_circles(img, circles))
    plt.title("After Hough Circles?")

    # If keypoints were found, then we assume that a notch was detected.
    return bool(keypoints)
Exemple #22
0
def compare(firebase_client=None):
    #Retrieving images
    images = [cv2.imread(file) for file in glob.glob("/home/pi/Desktop/giraffe/*.jpg")]
    #images.extend([cv2.imread(file) for file in glob.glob("/home/pi/Desktop/giraffe/*.jpg")])

    #Creating background subtractor
    backSub = cv2.createBackgroundSubtractorMOG2()

    #Creating blob detector 
    params = cv2.SimpleBlobDetector_Params()
    params.filterByColor = True
    params.blobColor = 0
    params.filterByArea = True
    params.minArea = 3000
    params.maxArea = 200000
    params.filterByCircularity = True
    params.minCircularity = 0.000001
    
    detector = cv2.SimpleBlobDetector_create(params)

    #Creating foreground mask
    for i in images:
        fgMask = backSub.apply(cv2.GaussianBlur(i,(159,159),0))

    #Threshing 
    thresh = cv2.threshold(fgMask, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]

    # Detecting the blobs
    keypoints = detector.detect(thresh)

    # Drawing the blobs
    im_with_keypoints = cv2.drawKeypoints(thresh, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

    #Lists
    topleft = [(0,0),(475,0),(1250,0),(2140,0),(475,740),(1250,740),(2140,740),(475,1620),(1250,1620),(2140,1620)]
    botright = [(0,0),(1065,400),(1840,400),(2592,400),(1065,1300),(1840,1300),(2592,1300),(1065,1944),(1840,1944),(2592,1944)]
    countinglist = [0,0,0,0,0,0,0,0,0]
    
    #Drawing Parameters
    for i in range (0,10):
        cv2.rectangle(im_with_keypoints,topleft[i],botright[i],(0,0,255),2)
    

    #Output

    for i in keypoints:
        x = i.pt[0]
        y = i.pt[1]
        for z in range(1,10):
            if (topleft[z][1] <= y <= botright[z][1]) and (topleft[z][0] <= x <= botright[z][0]):
                countinglist[z-1] += 1       
     
     # Image File
    image = cv2.imread("/home/pi/Desktop/giraffe/Result/basic.jpg")
    # Tables 1- 9
    table = [(240, 108),(484, 108),(769, 108),(240, 368),(484, 368),(769, 368),(240, 637),(484, 637),(769, 637)]

    # Table Status
    o =(0,0,255) # Occupied 
    u =(0,255,0) # Unoccupied

    for i in range(0,9):
        if countinglist[i] == 0:
            cv2.circle (image,table[i],50,u,-1)
        else :
             cv2.circle(image,table[i],50,o,-1)


    cv2.imshow('result',image) #Remove after validation
    output_image = "/home/pi/Desktop/giraffe/Result/result.jpg"
    cv2.imwrite(output_image,image)
    if firebase_client:
        database.upload_image(output_image)
        occupants = sum([1 for x in countinglist if x > 0])
        database.update_table_occupancy(firebase_client, occupants)
    
    
    cv2.imshow("Keypoints", im_with_keypoints) #Remove after validation
    cv2.imwrite(("/home/pi/Desktop/giraffe/Result/keypoints.jpg"),im_with_keypoints)
    cv2.waitKey(5000)
    cv2.destroyAllWindows()
def main():
    global velocity_pub, bridge, blob_parameters, detector, color, new_img_available, new_img_msg, low_hue, low_sat, low_val, high_hue, high_sat, high_val, cv_image, hsv_image, lower_limits, upper_limits, thresholded_image, keypoints, first_blob, robot_vel, x_coord, y_coord, blob_size, state_tracking, state_approaching_blob, new_blob_available

    rospy.init_node("follow_three_blobs_final")

    rospy.Subscriber("camera/color/image_raw", Image, get_image)

    velocity_pub = rospy.Publisher("robotont/cmd_vel", Twist, queue_size=1)

    bridge = cv_bridge.core.CvBridge()

    cv2.namedWindow("Thresholded image", cv2.WINDOW_AUTOSIZE)
    cv2.namedWindow('blob_detector/follow_three_blobs_final')

    blob_parameters = cv2.SimpleBlobDetector_Params()

    blob_parameters.filterByColor = True
    blob_parameters.filterByArea = False
    blob_parameters.filterByCircularity = False
    blob_parameters.filterByInertia = False
    blob_parameters.filterByConvexity = False

    blob_parameters.blobColor = 255

    blob_parameters.minArea = 1491
    blob_parameters.maxArea = 307200

    blob_parameters.minCircularity = 0
    blob_parameters.maxCircularity = 1

    blob_parameters.minInertiaRatio = 0
    blob_parameters.maxInertiaRatio = 1

    blob_parameters.minConvexity = 0
    blob_parameters.maxConvexity = 1

    blob_parameters.minDistBetweenBlobs = 100

    detector = cv2.SimpleBlobDetector_create(blob_parameters)

    while not rospy.is_shutdown():
        # green
        if color == 1:
            low_hue = 86
            low_sat = 189
            low_val = 102
            high_hue = 111
            high_sat = 255
            high_val = 159

            #print("values to green blob")

        # yellow
        elif color == 2:
            low_hue = 26
            low_sat = 177
            low_val = 99
            high_hue = 42
            high_sat = 255
            high_val = 197

            #print("values to yellow blob")

        else:
            color = 1
            low_hue = 86
            low_sat = 189
            low_val = 102
            high_hue = 111
            high_sat = 255
            high_val = 159

            #print("values to green blob")

        if state_tracking:
            robot_vel = Twist()
            robot_vel.angular.z = 0.5
            velocity_pub.publish(robot_vel)

            print("tracking blob")

        if new_img_available:

            cv_image = bridge.imgmsg_to_cv2(new_img_msg,
                                            desired_encoding='bgr8')

            cv2.imshow('blob_detector/follow_three_blobs_final', cv_image)

            cv2.waitKey(1)

            new_img_available = False
            hsv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV)

            lower_limits = np.array([low_hue, low_sat, low_val])
            upper_limits = np.array([high_hue, high_sat, high_val])

            thresholded_image = cv2.inRange(hsv_image, lower_limits,
                                            upper_limits)
            thresholded_image = cv2.bitwise_and(cv_image,
                                                cv_image,
                                                mask=thresholded_image)

            cv2.imshow("Thresholded image", thresholded_image)

            keypoints = detector.detect(thresholded_image)

            if len(keypoints) > 0:
                first_blob = keypoints[0]
                x_coord = int(first_blob.pt[0])
                y_coord = int(first_blob.pt[1])
                blob_size = int(first_blob.size)
                print("x_coord: " + str(x_coord) + ", y_coord: " +
                      str(y_coord) + ", blob_size: " + str(blob_size))
                print("found blob")
                state_tracking = False
                state_approaching_blob = True
            else:
                continue
        else:
            continue

        if state_approaching_blob:

            if x_coord < 300:
                robot_vel.angular.z = 0.15
            elif x_coord > 300:
                robot_vel.angular.z = -0.15
            else:
                robot_vel.angular.z = 0.0

            if blob_size < 370:
                robot_vel.linear.x = 0.1
            elif blob_size > 370:
                robot_vel.linear.x = -0.1
            else:
                robot_vel.linear.x = 0.0

            velocity_pub.publish(robot_vel)
            print("approaching blob")

            if blob_size > 370:

                robot_vel.linear.x = 0.0
                robot_vel.linear.y = 0.0
                robot_vel.angular.z = 0.0

                velocity_pub.publish(robot_vel)
                state_approaching_blob = False
                state_tracking = True
                color = color + 1
                print("arrived to blob and will track the next blob")
            else:
                continue

        else:
            continue

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
Exemple #24
0
def main():
    global x0, y0, x1, y1, drawing, frame, mode, lower, upper
    global MODE_TRACK, MODE_SHOW, MODE_MARK

    #
    # initialization
    #
    MODE_TRACK = 0  # track an object
    MODE_SHOW = 1  # only show tracking narkers on video
    MODE_MARK = 2  # mark region color to track

    lower = np.array([0, 0, 0], dtype="uint8")
    upper = np.array([255, 255, 255], dtype="uint8")
    mode = MODE_SHOW
    mode_text = 'Show'
    drawing = False  # true if mouse is pressed
    x0, y0 = -1, -1
    x1, y1 = -1, -1

    print ' m - mark color region to track\n t - track\n s - display tracking marker only\n ESC - quit'

    #
    # link event callback funtion
    #
    cv2.namedWindow('image', cv2.WINDOW_GUI_NORMAL + cv2.WINDOW_AUTOSIZE)
    cv2.setMouseCallback('image', mark_rect)

    #
    # setup font for overlay text
    #
    font = cv2.FONT_HERSHEY_SIMPLEX

    #
    # Set up a blob detector with some parameters
    #
    det_param = cv2.SimpleBlobDetector_Params()
    #det_param.thresholdStep = 1
    #det_param.minThreshold = 0
    #det_param.maxThreshold = 0
    #det_param.minRepeatability = 10
    #det_param.minDistBetweenBlobs = 10
    det_param.filterByColor = False
    det_param.filterByCircularity = False
    det_param.filterByInertia = False
    det_param.filterByConvexity = False
    det_param.filterByArea = True
    det_param.minArea = 1000
    det_param.maxArea = 10000
    if cv2.__version__.startswith("3."):
        detector = cv2.SimpleBlobDetector_create(det_param)
    else:
        detector = cv2.SimpleBlobDetector(det_param)

    #
    # open the capture device and print some
    # useful properties
    #
    cap = cv2.VideoCapture(0)
    if cap.isOpened():
        frameWidth = cap.get(cv.CV_CAP_PROP_FRAME_WIDTH)
        frameHeight = cap.get(cv.CV_CAP_PROP_FRAME_HEIGHT)
        print 'frame: width {}, height {}'.format(frameWidth, frameHeight)

    #
    # frame capture and processing loop
    #
    while (True):
        # capture a frame
        # TODO consider color conversions to LAB or HSV etc
        #      to get better object detection in different lighting conditions
        ret, frame = cap.read()

        # operations on the frame done here
        if mode == MODE_MARK:
            cv2.line(frame, (x0, y0), (x1, y0), (0, 255, 0), 1)
            cv2.line(frame, (x1, y0), (x1, y1), (0, 255, 0), 1)
            cv2.line(frame, (x1, y1), (x0, y1), (0, 255, 0), 1)
            cv2.line(frame, (x0, y1), (x0, y0), (0, 255, 0), 1)
        else:
            # calculate tracking and show markers on video
            # create NumPy arrays from the 'upper' and lower' boundaries
            # source: https://www.pyimagesearch.com/2014/08/04/opencv-python-color-detection/
            # source: https://www.learnopencv.com/color-spaces-in-opencv-cpp-python/
            # TODO apply filtering such as dilate() or other model to the 'mask'
            mask = cv2.inRange(frame, lower, upper)
            #output = cv2.bitwise_and(frame, frame, mask = mask)

            # TODO find blob and calculate center of mass and deviation from
            #      center of frame. this will be the tracking error
            # source: https://www.learnopencv.com/blob-detection-using-opencv-python-c/
            # TODO consider error filtering such as Kalman or Exponential moving Average (EMA)
            # Detect blobs
            mask = cv2.bitwise_not(mask)
            keypoints = detector.detect(mask)

            # Draw detected blobs as red circles.
            # cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
            frame_with_keypoints = cv2.drawKeypoints(
                frame, keypoints, np.array([]), (0, 0, 255),
                cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

        if mode == MODE_TRACK:
            # TODO PID tracking and NXT pan-tilt control
            pass

        # add text and markers to the image
        cv2.putText(frame, mode_text, (1, 20), font, 0.5, (255, 255, 255), 1,
                    cv2.LINE_AA)

        # display the resulting frame
        cv2.imshow('mask', mask)
        #cv2.imshow('output', output)
        cv2.imshow('image', frame)
        cv2.imshow('blob', frame_with_keypoints)

        # key input mode/command
        key = cv2.waitKey(1) & 0xFF
        if key == 27:
            break
        elif key == ord('m'):
            x0, y0 = -1, -1
            x1, y1 = -1, -1
            mode_text = 'Mark'
            mode = MODE_MARK
        elif key == ord('t'):
            mode_text = 'Track'
            mode = MODE_TRACK
        elif key == ord('s'):
            mode_text = 'Show'
            mode = MODE_SHOW

    # when done, release the capture
    cap.release()
    cv2.destroyAllWindows()
def main(image):
    # load image specified in command line
    img = cv2.imread(image, 1)

    # increase the 180x160 px image to 1080x720px for ease of seeing.  This also changes how the morphology and blob detector function. Set as globals
    scaled_img = cv2.resize(img,
                            dsize=(width, height),
                            interpolation=cv2.INTER_CUBIC)

    # nomalize the image
    norm_img = cv2.normalize(scaled_img, scaled_img, 0, 255, cv2.NORM_MINMAX,
                             cv2.CV_8U)

    # convert to HSV colour space as this will detect the coloured hot spots more easily
    hsv_img = cv2.cvtColor(norm_img, cv2.COLOR_BGR2HSV)

    # only look at the value channel of HSV as it will ignore the greyscale bg.
    v_img = hsv_img[:, :, 2]

    # remove noise while retaining edges
    blurred_img = cv2.bilateralFilter(v_img, 9, 150, 150)

    # erode away noise such as legs under tables and other small hot objects
    eroded_img = cv2.erode(blurred_img, np.ones((erodeSize, erodeSize)))

    # some people end up being broken into multiple blobs, merge these together.  This also causes farfield close objects to merge.... not good.
    dilated_img = cv2.dilate(eroded_img, np.ones((dilateSize, dilateSize)))

    # threshold the image so are only left with points of interest
    retval, thresh_img = cv2.threshold(dilated_img, 220, 255,
                                       cv2.THRESH_BINARY)

    # invert the image for the blob detector
    invert_img = cv2.bitwise_not(thresh_img)

    # setup the first pass params of the blob detector
    params = cv2.SimpleBlobDetector_Params()

    params.minThreshold = 0
    params.maxThreshold = 255

    # helps remove some of the merging between rows of seats
    params.filterByInertia = True
    params.minInertiaRatio = 0.25

    params.filterByArea = False
    params.filterByCircularity = False
    params.filterByConvexity = False

    # run the detector and get the keypoints
    detector = cv2.SimpleBlobDetector_create(params)
    keypoints = detector.detect(invert_img)

    #--- Second Pass Through Blob Detector ---#

    # keep the same erosion as first pass as we just want to adjust dilation now
    eroded_img2 = cv2.erode(blurred_img, np.ones((erodeSize, erodeSize)))

    # use a smaller dilation to reduce some of the merging of multiple hotspots into 1 hotspot.
    dilated_img2 = cv2.dilate(eroded_img2, np.ones((dilateSize2, dilateSize2)))

    # threshold the image same as above
    retval2, thresh_img2 = cv2.threshold(dilated_img2, 220, 255,
                                         cv2.THRESH_BINARY)

    # invert the image same as above
    invert_img2 = cv2.bitwise_not(thresh_img2)

    # setup the second pass params of the blob detector
    params2 = cv2.SimpleBlobDetector_Params()

    params2.minThreshold = 0
    params2.maxThreshold = 255

    # as now using a smaller dilation, large blobs in nearfield are broken into
    # multiple smaller blobs.  minArea is a type of noise filer to remove these
    # smaller blobs
    params2.filterByArea = True
    params2.minArea = 800
    params2.maxArea = 6000

    params2.filterByCircularity = False
    params2.filterByConvexity = False
    params2.filterByInertia = False

    # run the deteto and get the keypoints of the image with new dilation
    detector2 = cv2.SimpleBlobDetector_create(params2)
    new_keypoints = detector2.detect(invert_img2)

    # algorithm to detect duplicate/estimated duplicate keypoints between both
    # images.  Simple points in square detection, might be nicer to change it
    # to be within a circle rather than a square.
    for new_keypoint in new_keypoints:
        for original_keypoint in keypoints:
            x_upper = original_keypoint.pt[0] + duplicate_thresh
            x_lower = original_keypoint.pt[0] - duplicate_thresh
            y_upper = original_keypoint.pt[1] + duplicate_thresh
            y_lower = original_keypoint.pt[1] - duplicate_thresh

            # if the new keypoint is within an area of the old keypoint, it is
            # duplicate. Add it to the list of duplicates.
            if x_lower <= new_keypoint.pt[
                    0] < x_upper and y_lower <= new_keypoint.pt[1] < y_upper:
                duplicate_keypoints.append(new_keypoint)

    # if any of the new keypoints aren't duplicates, add them to the original keypoints.
    for new_keypoint in new_keypoints:
        if new_keypoint not in duplicate_keypoints:
            keypoints.append(new_keypoint)

    # the amout of people found in the scene.
    people = str(len(keypoints))

    # draw the keypoints on the image
    img_keypoints = cv2.drawKeypoints(
        invert_img, keypoints, np.array([]), (255, 0, 255),
        cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

    # put text number of Contour Keypoints on Screen in Blue
    cv2.putText(img_keypoints, people, (80, 80), cv2.FONT_HERSHEY_SIMPLEX, 3,
                (255, 0, 0), 3)

    # show the keypoints on the image
    cv2.imshow('People Counter', img_keypoints)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
Exemple #26
0
# func name using underscore, variable name using camelCase
# img1 is train, img2 is query
import cv2
import numpy as np
# import matplotlib.pyplot as plt
import skvideo.io as io

detectors = {
    'fast': cv2.FastFeatureDetector_create(),
    'star': cv2.xfeatures2d.StarDetector_create(),
    'brisk': cv2.BRISK_create(),
    'orb': cv2.ORB_create(),
    'mser': cv2.MSER_create(),
    'gftt': cv2.GFTTDetector_create(),
    'blob': cv2.SimpleBlobDetector_create()
}

descriptors = {
    'brief': cv2.xfeatures2d.BriefDescriptorExtractor_create(),
    'orb': cv2.ORB_create(),
    'daisy': cv2.xfeatures2d.DAISY_create(),
    'boost': cv2.xfeatures2d.BoostDesc_create(),
    'freak': cv2.xfeatures2d.FREAK_create(),
    'latch': cv2.xfeatures2d.LATCH_create(),
    'lucid': cv2.xfeatures2d.LUCID_create(),
    'vgg': cv2.xfeatures2d.VGG_create()
}

matchers = {
    'bruteForce': cv2.BFMatcher(cv2.NORM_HAMMING),
def run(motorControl, camera):
    # Ask user for color here:
    color = input(
        "What color would you like? (red (1), green (2), blue (3)): ")
    print("\n")
    setMaskToLookForColor(color)

    camera.start()

    # Initialize the SimpleBlobDetector
    params = cv.SimpleBlobDetector_Params()
    detector = cv.SimpleBlobDetector_create(params)

    # Attempt to open a SimpleBlobDetector parameters file if it exists,
    # Otherwise, one will be generated.
    # These values WILL need to be adjusted for accurate and fast blob detection.
    # yaml, xml, or json
    fs = cv.FileStorage("params.yaml", cv.FILE_STORAGE_READ)
    if fs.isOpened():
        detector.read(fs.root())
    else:
        print("WARNING: params file not found! Creating default file.")

        fs2 = cv.FileStorage("params.yaml", cv.FILE_STORAGE_WRITE)
        detector.write(fs2)
        fs2.release()

    fs.release()

    # Create windows
    cv.namedWindow(WINDOW1)
    cv.namedWindow(WINDOW2)

    fps, prev = 0.0, 0.0
    while True:
        # Calculate FPS
        now = time.time()
        fps = (fps * FPS_SMOOTHING + (1 / (now - prev)) *
               (1.0 - FPS_SMOOTHING))
        prev = now

        # Get a frame
        frame = camera.read()

        # Blob detection works better in the HSV color space
        # (than the RGB color space) so the frame is converted to HSV.
        frame_hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV)

        # Create a mask using the given HSV range
        mask = cv.inRange(frame_hsv, (minH, minS, minV), (maxH, maxS, maxV))

        # Run the SimpleBlobDetector on the mask.
        # The results are stored in a vector of 'KeyPoint' objects,
        # which describe the location and size of the blobs.
        keypoints = detector.detect(mask)

        # For each detected blob, draw a circle on the frame
        frame_with_keypoints = cv.drawKeypoints(
            frame,
            keypoints,
            None,
            color=(0, 255, 0),
            flags=cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

        # Write text onto the frame
        cv.putText(frame_with_keypoints, "FPS: {:.1f}".format(fps), (5, 15),
                   cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0))
        cv.putText(frame_with_keypoints, "{} blobs".format(len(keypoints)),
                   (5, 35), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0))

        # Display the frame
        cv.imshow(WINDOW1, mask)
        cv.imshow(WINDOW2, frame_with_keypoints)

        # Check for user input
        c = cv.waitKey(1)
        if c == 27 or c == ord('q') or c == ord('Q'):  # Esc or Q
            camera.stop()
            break

        # Get one KeyPoint
        points = cv.KeyPoint_convert(keypoints)

        xPoint = 0
        if len(points) > 0:
            xPoint = points[0][0]

        distanceError = xPoint - CENTER_X_COORD

        if abs(distanceError) > 150:
            faceGoal(distanceError, motorControl)
        else:
            motorControl.setSpeedsPWM(0, 0)

    camera.stop()
Exemple #28
0
def extract_binary_masks_blob(A,
                              neuron_radius,
                              dims,
                              num_std_threshold=1,
                              minCircularity=0.5,
                              minInertiaRatio=0.2,
                              minConvexity=.8):
    """
    Function to extract masks from data. It will also perform a preliminary selectino of good masks based on criteria like shape and size

    Parameters:
    ----------
    A: scipy.sparse matris
        contains the components as outputed from the CNMF algorithm

    neuron_radius: float
        neuronal radius employed in the CNMF settings (gSiz)

    num_std_threshold: int
        number of times above iqr/1.349 (std estimator) the median to be considered as threshold for the component

    minCircularity: float
        parameter from cv2.SimpleBlobDetector

    minInertiaRatio: float
        parameter from cv2.SimpleBlobDetector

    minConvexity: float
        parameter from cv2.SimpleBlobDetector

    Returns:
    --------
    masks: np.array

    pos_examples:

    neg_examples:

    """
    import cv2
    params = cv2.SimpleBlobDetector_Params()
    params.minCircularity = minCircularity
    params.minInertiaRatio = minInertiaRatio
    params.minConvexity = minConvexity

    # Change thresholds
    params.blobColor = 255

    params.minThreshold = 0
    params.maxThreshold = 255
    params.thresholdStep = 3

    params.minArea = np.pi * ((neuron_radius * .75)**2)

    params.filterByColor = True
    params.filterByArea = True
    params.filterByCircularity = True
    params.filterByConvexity = True
    params.filterByInertia = True

    detector = cv2.SimpleBlobDetector_create(params)

    masks_ws = []
    pos_examples = []
    neg_examples = []

    for count, comp in enumerate(A.tocsc()[:].T):

        print(count)
        comp_d = np.array(comp.todense())
        gray_image = np.reshape(comp_d, dims, order='F')
        gray_image = (gray_image - np.min(gray_image)) / (
            np.max(gray_image) - np.min(gray_image)) * 255
        gray_image = gray_image.astype(np.uint8)

        # segment using watershed
        markers = np.zeros_like(gray_image)
        elevation_map = sobel(gray_image)
        thr_1 = np.percentile(gray_image[gray_image > 0], 50)
        iqr = np.diff(np.percentile(gray_image[gray_image > 0], (25, 75)))
        thr_2 = thr_1 + num_std_threshold * iqr / 1.35
        markers[gray_image < thr_1] = 1
        markers[gray_image > thr_2] = 2
        edges = watershed(elevation_map, markers) - 1
        # only keep largest object
        label_objects, nb_labels = ndi.label(edges)
        sizes = np.bincount(label_objects.ravel())

        if len(sizes) > 1:
            idx_largest = np.argmax(sizes[1:])
            edges = (label_objects == (1 + idx_largest))
            edges = ndi.binary_fill_holes(edges)
        else:
            print('empty component')
            edges = np.zeros_like(edges)

        if 1:
            masks_ws.append(edges)
            keypoints = detector.detect((edges * 200.).astype(np.uint8))
        else:
            masks_ws.append(gray_image)
            keypoints = detector.detect(gray_image)

        if len(keypoints) > 0:
            pos_examples.append(count)

        else:
            neg_examples.append(count)

    return np.array(masks_ws), np.array(pos_examples), np.array(neg_examples)
Exemple #29
0
print("Prediction from Kalman: GREEN\n")

subtractor = cv2.createBackgroundSubtractorMOG2(history=20,
                                                varThreshold=15,
                                                detectShadows=False)
#Setting blob detection parameters
params = cv2.SimpleBlobDetector_Params()
params.filterByColor = False
params.filterByArea = True
params.minArea = 300
params.maxArea = 100000
params.filterByCircularity = False
params.filterByConvexity = False
#params.filterByInertia = False
# Set up the detector with set parameters.
detector = cv2.SimpleBlobDetector_create(params)

if (((args.video_file) and (cap.open(str(args.video_file))))
        or (cap.open(args.camera_to_use))):

    # Create Windows
    cv2.namedWindow(windowName, cv2.WINDOW_NORMAL)
    cv2.namedWindow(windowName2, cv2.WINDOW_NORMAL)
    cv2.namedWindow(windowNameSelection, cv2.WINDOW_NORMAL)

    # set sliders for HSV selection thresholds

    s_lower = 60
    cv2.createTrackbar("s lower", windowName2, s_lower, 255, nothing)
    s_upper = 255
    cv2.createTrackbar("s upper", windowName2, s_upper, 255, nothing)
class PointsDetector:
    """
        Class implementing simple blob detection on 
        images coming from ros node/bag

    """
    def __init__(self):
        self.bridge = CvBridge()
        self.image_pub = rospy.Publisher("image_points", Image, queue_size=10)
        self.image_sub = rospy.Subscriber("/usb_cam/image_raw", Image,
                                          self.callback)

    def getBlobDetectorParams(self):
        """
            Sets and returns blob detector params

        """

        #Set up parameters for blob detector
        params = cv2.SimpleBlobDetector_Params()
        #Change thresholds
        params.minThreshold = 0
        params.maxThreshold = 100
        #Filter by Color
        params.filterByColor = True
        params.blobColor = 0
        #Filter by Circularity
        params.filterByCircularity = True
        params.minCircularity = 0.6
        #Filter by Area
        params.filterByArea = True
        params.minArea = 50
        #Filter by Inertia
        params.filterByInertia = True
        params.minInertiaRatio = 0.1
        #Filter by Convexity - critical setting
        params.filterByConvexity = True
        params.minConvexity = 0.5

        return (params)

    def callback(self, data):
        """
            Callback called when image arrives

        """

        #always enclose call to imgmsg_to_cv2() in try-catch to
        #catch conversion errors
        try:
            #cv_image is a numpy array - function performs coversion
            #to grayscale
            cv_image = self.bridge.imgmsg_to_cv2(data, "mono8")
        except CvBridgeError, e:
            print e

        #threshold the image
        _, cv_image_thresh = cv2.threshold(cv_image, 230, 255,
                                           cv2.THRESH_BINARY_INV)

        #remove noise from the original image
        cv_image_thresh = removeNoise(cv_image_thresh)

        params = self.getBlobDetectorParams()

        # Create a detector with the parameters
        ver = (cv2.__version__).split('.')
        if (int(ver[0]) < 3):
            detector = cv2.SimpleBlobDetector(params)
        else:
            detector = cv2.SimpleBlobDetector_create(params)

        # Detect blobs
        keypoints = detector.detect(cv_image_thresh)

        # Draw detected blobs as red circles.
        # cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
        cv_image_thresh_with_keypoints = cv2.drawKeypoints(
            cv_image_thresh, keypoints, np.array([]), (0, 0, 255),
            cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

        #cv_image_thresh_with_keypoints = drawLines(cv_image_thresh_with_keypoints, keypoints)
        cv2.imshow("PointsDetector", cv_image_thresh_with_keypoints)
        cv2.imshow("original_image", cv_image)

        cv2.waitKey(5) & 0xFF

        try:
            self.image_pub.publish(
                self.bridge.cv2_to_imgmsg(cv_image_thresh, "mono8"))
        except CvBridgeError, e:
            print e