Exemple #1
0
def draw(latticesize, defectsize):
    pngfolder = r'\\icnas2.cc.ic.ac.uk\kjs18\GitHub\RPMv3\Latticeimages'
    latticedim = latticesize
    for file in os.listdir(pngfolder):
        if str(latticedim) in file:
            lattice = os.path.join(pngfolder, file)
            image = os.path.join(pngfolder, file)

    #SETS ARGUEMENTS
    #ap = argparse.ArgumentParser()
    #ap.add_argument("-i", "--image", help = "path to the image file")

    #ap.add_argument("-l", "--lattice", help = "path to the image file")
    #args = vars(ap.parse_args())

    #TAKES LATTICE IMAGE
    lattice = cv2.imread(lattice)
    print(
        "Click vertices in following order: (1) top left (2) top right (3) bottom left (4) bottom right (5) centre of where the defect box is - for odd vertex box, click on a vertex, for even, click in the middle of 4 vertices"
    )
    # initiates click event for the lattice image
    cv2.imshow("click", lattice)
    cv2.setMouseCallback("click", click_event_lattice)
    cv2.waitKey(-1)

    #SETS MIN AND MAX FOR EACH CLICK
    Xmin, Ymin, X1, Y1, X2, Y2, Xmax, Ymax, Xc, Yc = verts[0][0], verts[0][
        1], verts[1][0], verts[1][1], verts[2][0], verts[2][1], verts[3][
            0], verts[3][1], verts[4][0], verts[4][1]
    #calculates lattice length and dimension
    latticelength = Xmax - Xmin
    vertexlengthX = (X1 - Xmin) / (latticesize)
    vertexlengthY = (Y2 - Ymin) / (latticesize)

    print(latticesize)
    vertexlengthY = (Ymax - Ymin) / latticesize
    dimension = latticesize

    #DRAWS CIRCLES ON THE VERTICES
    for i in range(
            2 * dimension + 1
    ):  #creates x range and y range for vertex coordinates coordinates
        Xlist.append(int(Xmin + ((Xmax - Xmin) / (2 * dimension)) * (i)))
        Ylist.append(int(Ymin + ((Ymax - Ymin) / (2 * dimension)) * (i)))
    C = []
    for i in range(len(Xlist)):  #creates a list of the vertex coordinates
        for j in range(len(Ylist)):
            C.append((Xlist[i - 1], Ylist[j - 1]))

    for vertex in C:
        cv2.circle(lattice, (vertex[0], vertex[1]), circle_radius, (0, 0, 0),
                   -1)

    for vertex in C:
        cv2.circle(lattice, (vertex[0], vertex[1]), circle_radius, (0, 0, 0),
                   -1)

    #SHOWS THE RESULT AND WAITS FOR A KEY TO PROCEED
    cv2.imshow("lattice w/ vertex", lattice)
    cv2.waitKey(-1)

    #SETS THE UPPER LIMIT FOR A CONNECTION TO BE A PERCENTAGE OF THE ISLAND DISTANCE
    line_length_upper = ((Xmax - Xmin) / dimension) * 0.8
    line_length_lower = ((Xmax - Xmin) / dimension) * 0.4
    barsize = (Xmax - Xmin) / dimension

    #READS THE MFM FILE AND CONVERTS IT TO GREYSCALE
    image = cv2.imread(image)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray_lattice = cv2.cvtColor(lattice, cv2.COLOR_BGR2GRAY)

    #DRAWS CROSSES ON THE VERTICES TO SEPERATE BLOBS WHICH ARE MERGED
    for vertex in C:
        cv2.line(gray, (int((vertex[0] - line_length_upper)),
                        int((vertex[1] - line_length_upper))),
                 (int(vertex[0] + line_length_upper),
                  int(vertex[1] + line_length_upper)), 158, 1)
        cv2.line(gray, (int((vertex[0] - line_length_upper)),
                        int((vertex[1] + line_length_upper))),
                 (int(vertex[0] + line_length_upper),
                  int(vertex[1] - line_length_upper)), 158, 1)

    #DRAWS GREY RECTANGLES AROUND THE EDGE OF THE LATTICE.
    cv2.rectangle(gray, (0, int(Ymin - line_length_upper * 0.5)), (10000, 0),
                  158, -1)
    cv2.rectangle(gray, (0, int(Ymax + line_length_upper * 0.5)),
                  (10000, 10000), 158, -1)
    cv2.rectangle(gray, (int(Xmin - line_length_upper * 0.5), 0), (0, 10000),
                  158, -1)
    cv2.rectangle(gray, (int(Xmax + line_length_upper * 0.5), 0),
                  (10000, 10000), 158, -1)

    #Draws grey vertex box.
    cv2.rectangle(gray, (int(Xc - (0.5 * defectsize * barsize)),
                         int(Yc - (0.5 * defectsize * barsize))),
                  (int(Xc + (0.5 * defectsize * barsize)),
                   int(Yc + (0.5 * defectsize * barsize))), 158, -1)

    #ALLOWS THE USER TO CONTROL THE LEVEL OF THRESHOLD TO MAKE WHITE SPOTS BLACK. (NEED TO MOVE THIS TO A FUNCTION)
    cv2.namedWindow("WHITE SPOTS TO BLACK (ESC WHEN DONE)")
    hh = 'Max'
    hl = 'Min'
    wnd = "WHITE SPOTS TO BLACK (ESC WHEN DONE)"
    cv2.createTrackbar("Max", wnd, 0, 255, nothing)

    while (1):
        hul = cv2.getTrackbarPos("Max", wnd)
        #ret,thresh1 = cv2.threshold(image,hul,huh,cv2.THRESH_BINARY)
        ret, whitedetect = cv2.threshold(gray, hul, 250, cv2.THRESH_BINARY_INV)
        #ret,thresh3 = cv2.threshold(image,hul,huh,cv2.THRESH_TRUNC)
        #ret,thresh4 = cv2.threshold(image,hul,huh,cv2.THRESH_TOZERO)
        #ret,thresh5 = cv2.threshold(image,hul,huh,cv2.THRESH_TOZERO_INV)
        # cv2.imshow(wnd)
        #cv2.imshow("thresh1",thresh1)
        cv2.imshow('original', gray)
        cv2.moveWindow('original', 0, 0)
        cv2.imshow(wnd, whitedetect)
        #cv2.imshow("thresh3",thresh3)
        #cv2.imshow("thresh4",thresh4)
        #cv2.imshow("thresh5",thresh5)
        k = cv2.waitKey(1) & 0xFF
        if k == ord('m'):
            mode = not mode
        elif k == 27:
            break
    cv2.destroyAllWindows()
    #Rectangle drawing

    #ALLOWS THE USER TO CONTROL THE LEVEL OF THRESHOLD TO MAKE BLACK SPOTS BLACK. (NEED TO MOVE THIS TO A FUNCTION)
    cv2.namedWindow("BLACK SPOTS TO BLACK (ESC WHEN DONE)")
    hh = 'Max'
    hl = 'Min'
    wnd = "BLACK SPOTS TO BLACK (ESC WHEN DONE)"
    cv2.createTrackbar("Max", wnd, 0, 255, nothing)

    while (1):
        hul = cv2.getTrackbarPos("Max", wnd)

        #ret,thresh1 = cv2.threshold(image,hul,huh,cv2.THRESH_BINARY)
        ret, blackdetect = cv2.threshold(gray, hul, 250, cv2.THRESH_BINARY_INV)
        blackdetect = cv2.bitwise_not(blackdetect)
        #ret,thresh3 = cv2.threshold(image,hul,huh,cv2.THRESH_TRUNC)
        #ret,thresh4 = cv2.threshold(image,hul,huh,cv2.THRESH_TOZERO)
        #ret,thresh5 = cv2.threshold(image,hul,huh,cv2.THRESH_TOZERO_INV)
        # cv2.imshow(wnd)
        #cv2.imshow("thresh1",thresh1)
        cv2.imshow('ORIGINAL', gray)
        cv2.moveWindow('ORIGINAL', 0, 0)
        cv2.imshow(wnd, blackdetect)
        #cv2.imshow("thresh3",thresh3)
        #cv2.imshow("thresh4",thresh4)
        #cv2.imshow("thresh5",thresh5)
        k = cv2.waitKey(1) & 0xFF
        if k == ord('m'):
            mode = not mode
        elif k == 27:
            break
    cv2.destroyAllWindows()
    #Rectangle drawing

    #ALLOWS THE USER TO CONTROL THE LEVEL OF THRESHOLD FOR THE LATTICE. (NEED TO MOVE THIS TO A FUNCTION)
    cv2.namedWindow("Lattice threshold (esc when done)")
    hh = 'Max'
    hl = 'Min'
    wnd = "Lattice threshold (esc when done)"
    cv2.createTrackbar("Max", wnd, 0, 255, nothing)

    while (1):
        hul = cv2.getTrackbarPos("Max", wnd)
        ret, latticethresh = cv2.threshold(gray_lattice, hul, 250,
                                           cv2.THRESH_BINARY)
        #ret,blackdetect = cv2.threshold(gray_lattice,hul,huh,cv2.THRESH_BINARY_INV)
        #ret,thresh3 = cv2.threshold(image,hul,huh,cv2.THRESH_TRUNC)
        #ret,thresh4 = cv2.threshold(image,hul,huh,cv2.THRESH_TOZERO)
        #ret,thresh5 = cv2.threshold(image,hul,huh,cv2.THRESH_TOZERO_INV)
        # cv2.imshow(wnd)
        #cv2.imshow("thresh1",thresh1)
        cv2.imshow('Lattice', lattice)
        cv2.moveWindow('Lattice', 0, 0)
        cv2.imshow(wnd, latticethresh)
        #cv2.imshow("thresh3",thresh3)
        #cv2.imshow("thresh4",thresh4)
        #cv2.imshow("thresh5",thresh5)
        k = cv2.waitKey(1) & 0xFF
        if k == ord('m'):
            mode = not mode
        elif k == 27:
            break
    cv2.destroyAllWindows()
    #Rectangle drawing

    #PARAMETERS FOR THE BLOB DETECTION

    params = cv2.SimpleBlobDetector_Params()

    # Change thresholds
    params.minThreshold = 1
    params.maxThreshold = 300

    # Filter by Area.
    params.filterByArea = True
    params.minArea = 20

    # Filter by Circularity
    params.filterByCircularity = False
    params.minCircularity = 0.1

    # Filter by Convexity
    params.filterByConvexity = False
    params.minConvexity = 0.87

    # Filter by Inertia
    params.filterByInertia = False
    params.minInertiaRatio = 0.01

    # Create a detector with the parameters
    ver = (cv2.__version__).split('.')
    if int(ver[0]) < 3:
        detector = cv2.SimpleBlobDetector(params)
    else:
        detector = cv2.SimpleBlobDetector_create(params)

    #DETECTS DARK AND WHITE BLOBS AND STORES THEM TO ARRAYS
    keypoints_dark = detector.detect(blackdetect)
    keypoints_light = detector.detect(whitedetect)

    keypoints_total = keypoints_light + keypoints_dark

    #DRAWS CIRCLES ON THE ORIGINAL IMAGE WHERE THE BLOBS ARE
    im_with_keypoints = cv2.drawKeypoints(
        gray, keypoints_total, np.array([]), (46, 155, 255),
        cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
    for vertex in C:
        im_with_keypoints = cv2.circle(im_with_keypoints,
                                       (vertex[0], vertex[1]), circle_radius,
                                       (0, 0, 0), -1)
    final_im = im_with_keypoints
    # Draw detected blobs as red circles.

    # cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures

    # the size of the circle corresponds to the size of blob

    #PARAMETERS FOR THE NEXT PART.
    IslandProperties = []
    BarCoordinates = []
    BarCoordinatesend = []
    mxfinal = []
    myfinal = []

    connectdots(keypoints_dark, keypoints_light, im_with_keypoints,
                latticethresh, line_length_lower, line_length_upper,
                IslandProperties)

    redo(im_with_keypoints, IslandProperties)
    cv2.imshow("", im_with_keypoints)
    cv2.waitKey(-1)
    again = input("redo again? Y/N")
    if again == "Y":
        redo(im_with_keypoints, IslandProperties)
    else:
        again = "N"
    print(IslandProperties)
    cv2.imshow("", im_with_keypoints)
    cv2.waitKey(-1)
    grid(IslandProperties, im_with_keypoints, vertexlengthX, dimension,
         vertexlengthY, Xmin, Ymin, C)
Exemple #2
0
def get_diagnostic_data(region, mask):
    rCopy = region.copy()
    rCopy = np.divide(rCopy, 256)
    rCopy = rCopy.astype('uint8')
    vis = rCopy.copy()
    vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
    nMask = np.where(mask > 0, [1], [0]).astype('uint8')
    rCopy *= nMask
    rCopy = np.where(rCopy > 0, [255], [0]).astype('uint8')
    rCopy = cv2.Canny(rCopy, 200, 255)
    rCopy = cv2.GaussianBlur(rCopy, (1, 1), 7)

    # Setup SimpleBlobDetector parameters.
    params = cv2.SimpleBlobDetector_Params()

    # Change thresholds
    params.minThreshold = 0
    params.maxThreshold = 255

    # Filter by Area.
    params.filterByArea = True
    params.minArea = 400
    params.maxArea = 2300

    # Filter by Circularity
    params.filterByCircularity = True
    params.minCircularity = 0.5

    # Filter by Convexity
    params.filterByConvexity = True
    params.minConvexity = 0.1

    # Filter by Inertia
    params.filterByInertia = True
    params.minInertiaRatio = 0.01
    detector = cv2.SimpleBlobDetector_create(params)

    # Detect blobs.
    keypoints = detector.detect(rCopy)

    # Draw detected blobs as red circles.
    # cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
    #vis = cv2.drawKeypoints(vis, keypoints, np.array([]), (0, 255, 0),cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
    intensities = []
    for keypoint in keypoints:
        cy = int(round(keypoint.pt[1]))
        cx = int(round(keypoint.pt[0]))

        rad = int(round(keypoint.size / 2.))
        rad = int(round(rad * 0.75))
        w, h = region.shape

        y, x = np.ogrid[-cx:w - cx, -cy:h - cy]
        mask = x**2 + y**2 <= rad**2
        vis = cv2.circle(vis, (cx, cy), rad, (0, 255, 0), 1)

        colorIntensity = ((np.power(2, 16) - 1) -
                          np.average(region[mask])) / (np.power(2, 16) - 1)
        if colorIntensity == np.nan or colorIntensity == np.inf:
            continue
        """The intensity value of each cell is found by first taking the average pixel value of each pixel 
        inside the cell region. This value is then subtracted from 255 so that the final intensity value 
        will increase with the darkness of the cell. Finally, the intensity value is divided by the maximum
        value a pixel can attain, which normalizes the intensity value to be between 0 and 1."""
        intensities.append(colorIntensity)
    debug = [vis, rCopy]
    return debug, intensities
def get_target(cap):
    """
    Vind een kleur in het zichtveld van de camera. 0 voor links en 1 voor rechts. -1 voor geen beweging.
    :param cap: De opencv camera
    :return: De richting waarin zich het doel bevind
    """
    cap.set(3, 320)
    cap.set(4, 240)

    if not cap.isOpened():
        #  sudo modprobe bcm2835-v4l2
        print("CAPTURE DEVICE NOT FOUND")
        exit(2)

    ret_cam, resized = cap.read()

    resized = cv2.blur(resized, (10, 10))

    ycc = cv2.cvtColor(resized, cv2.COLOR_BGR2YCrCb)

    mask = cv2.inRange(ycc, lower_red, upper_red)

    params = cv2.SimpleBlobDetector_Params()

    # Change thresholds
    params.minThreshold = 10
    params.maxThreshold = 200
    params.blobColor = 255

    w, h, c = resized.shape
    # Filter by Area.
    params.filterByArea = True
    params.minArea = int(((w + h) / 4) + 1)

    # Filter by Circularity
    params.filterByCircularity = False
    params.minCircularity = 0.1

    # Filter by Convexity
    params.filterByConvexity = False
    params.minConvexity = 0.01

    # Filter by Inertia
    params.filterByInertia = False
    params.minInertiaRatio = 0.1

    # Create a detector with the parameters
    detector = cv2.SimpleBlobDetector_create(params)

    # Detect blobs.
    keypoints = detector.detect(mask)

    # get biggest blob
    if len(keypoints) > 0:
        max = 0
        pos = 0
        max_pos = 0

        for keypoint in keypoints:

            if keypoint.size > max:
                max = keypoint.size
                max_pos = pos

            pos += 1

        biggest_target = keypoints[max_pos]

        target_position = biggest_target.pt[0] / resized.shape[1]

    # if no blobs are found
    else:
        target_position = -1

    return target_position
def main():
    global payload

    # argument parsing
    parser = argparse.ArgumentParser()
    parser.add_argument('--headless',
                        help='run the pygame headlessly',
                        action='store_true')

    parser.add_argument("--color_depth",
                        help="integer number of colors to use to draw temps",
                        type=int)
    parser.add_argument('--max_temp', help='initial max temperature', type=int)
    parser.add_argument(
        '--ambient_offset',
        help='value to offset ambient temperature by to get rolling MAXTEMP',
        type=int)
    parser.add_argument(
        '--ambient_time',
        help='length of ambient temperature collecting intervals in seconds',
        type=int)

    parser.add_argument('--blob_min_threshold',
                        help='blod detection min threshold',
                        type=int)
    parser.add_argument('--blob_max_threshold',
                        help='blod detection min threshold',
                        type=int)

    parser.add_argument('--blob_filterbyarea',
                        help='blod detection filter by area',
                        action='store_true')
    parser.add_argument('--blob_min_area',
                        help='blod detection filter by area min area',
                        type=int)

    parser.add_argument('--blob_filterbycircularity',
                        help='blod detection filter by circularity',
                        action='store_true')
    parser.add_argument(
        '--blob_min_circularity',
        help='blod detection filter by circularity min circularity',
        type=float)

    parser.add_argument('--blob_filterbyconvexity',
                        help='blod detection filter by convexity',
                        action='store_true')
    parser.add_argument(
        '--blob_min_convexity',
        help='blod detection filter by convexity min convexity',
        type=float)

    parser.add_argument('--blob_filterbyinertia',
                        help='blod detection filter by inertia',
                        action='store_true')
    parser.add_argument('--blob_min_inertiaratio',
                        help='blod detection filter by inertia inertia ratio',
                        type=float)

    parser.add_argument('--csv_save_interval',
                        help='csv file saving interval in seconds',
                        type=int)

    args = parser.parse_args()
    print(args)

    COLOR_DEPTH = args.color_depth
    MAX_TEMP = args.max_temp
    AMBIENT_OFFSET = args.ambient_offset
    AMBIENT_TIME = args.ambient_time

    BLOB_MIN_THRESHOLD = args.blob_min_threshold
    BLOB_MAX_THRESHOLD = args.blob_max_threshold

    BLOB_FILTERBYAREA = args.blob_filterbyarea
    BLOB_MIN_AREA = args.blob_min_area

    BLOB_FILTERBYCIRCULARITY = args.blob_filterbycircularity
    BLOB_MIN_CIRCULARITY = args.blob_min_circularity

    BLOB_FILTERBYCONVEXITY = args.blob_filterbyconvexity
    BLOB_MIN_CONVEXITY = args.blob_min_convexity

    BLOB_FILTERBYINERTIA = args.blob_filterbyinertia
    BLOB_MIN_INERTIARATIO = args.blob_min_inertiaratio

    CSV_SAVE_INTERVAL = args.csv_save_interval

    # create data folders if they don't exist
    if not os.path.exists(get_filepath('../data')):
        os.makedirs(get_filepath('../data'))

    i2c_bus = busio.I2C(board.SCL, board.SDA)

    # For headless pygame
    if args.headless:
        os.putenv('SDL_VIDEODRIVER', 'dummy')
    else:
        os.putenv('SDL_FBDEV', '/dev/fb1')

    pygame.init()

    # initialize the sensor
    sensor = adafruit_amg88xx.AMG88XX(i2c_bus)

    points = [(math.floor(ix / 8), (ix % 8)) for ix in range(0, 64)]
    grid_x, grid_y = np.mgrid[0:7:32j, 0:7:32j]

    # sensor is an 8x8 grid so lets do a square
    height = 240
    width = 240

    # the list of colors we can choose from
    black = Color("black")
    colors = list(black.range_to(Color("white"), COLOR_DEPTH))

    # create the array of colors
    colors = [(int(c.red * 255), int(c.green * 255), int(c.blue * 255))
              for c in colors]

    displayPixelWidth = width / 30
    displayPixelHeight = height / 30

    lcd = pygame.display.set_mode((width, height))

    lcd.fill((255, 0, 0))

    pygame.display.update()
    pygame.mouse.set_visible(False)

    lcd.fill((0, 0, 0))
    pygame.display.update()

    # Setup SimpleBlobDetector parameters.
    params = cv2.SimpleBlobDetector_Params()

    # Change thresholds
    if BLOB_MIN_THRESHOLD:
        params.minThreshold = BLOB_MIN_THRESHOLD
    if BLOB_MAX_THRESHOLD:
        params.maxThreshold = BLOB_MAX_THRESHOLD

    # Filter by Area.
    if BLOB_FILTERBYAREA:
        params.filterByArea = BLOB_FILTERBYAREA
        params.minArea = BLOB_MIN_AREA

    # Filter by Circularity
    if BLOB_FILTERBYCIRCULARITY:
        params.filterByCircularity = BLOB_FILTERBYCIRCULARITY
        params.minCircularity = BLOB_MIN_CIRCULARITY

    # Filter by Convexity
    if BLOB_FILTERBYCONVEXITY:
        params.filterByConvexity = BLOB_FILTERBYCONVEXITY
        params.minConvexity = BLOB_MIN_CONVEXITY

    # Filter by Inertia
    if BLOB_FILTERBYINERTIA:
        params.filterByInertia = BLOB_FILTERBYINERTIA
        params.minInertiaRatio = BLOB_MIN_INERTIARATIO

    # Set up the detector with default parameters.
    detector = cv2.SimpleBlobDetector_create(params)

    # initialize centroid tracker
    ct = CentroidTracker()

    # a dictionary to map each unique object ID to a TrackableObject
    trackableObjects = {}

    # the total number of objects that have moved either up or down
    total_down = 0
    total_up = 0
    total_down_old = 0
    total_up_old = 0

    # let the sensor initialize
    time.sleep(.1)

    # press key to exit
    screencap = True

    # array to hold mode of last 10 minutes of temperatures
    mode_list = []

    # thread for saving data
    save_thread = threading.Thread(target=csv_save, args=(CSV_SAVE_INTERVAL, ))
    save_thread.start()

    print('sensor started!')

    while (screencap):
        start = time.time()

        # read the pixels
        pixels = []
        for row in sensor.pixels:
            pixels = pixels + row

        payload = [
            str(datetime.now().isoformat()),
            ct.get_count(), total_up, total_down
        ]

        mode_result = stats.mode([round(p) for p in pixels])
        mode_list.append(int(mode_result[0]))

        # instead of taking the ambient temperature over one frame of data take it over a set amount of time
        MAX_TEMP = float(np.mean(mode_list)) + AMBIENT_OFFSET
        pixels = [
            map_value(p,
                      np.mean(mode_list) + 1, MAX_TEMP, 0, COLOR_DEPTH - 1)
            for p in pixels
        ]

        # perform interpolation
        bicubic = griddata(points, pixels, (grid_x, grid_y), method='cubic')

        # draw everything
        for ix, row in enumerate(bicubic):
            for jx, pixel in enumerate(row):
                try:
                    pygame.draw.rect(
                        lcd, colors[constrain(int(pixel), 0, COLOR_DEPTH - 1)],
                        (displayPixelHeight * ix, displayPixelWidth * jx,
                         displayPixelHeight, displayPixelWidth))
                except:
                    print("Caught drawing error")

        surface = pygame.display.get_surface()
        myfont = pygame.font.SysFont("comicsansms", 25)

        img = pygame.surfarray.array3d(surface)
        img = np.swapaxes(img, 0, 1)

        # Read image
        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        img_not = cv2.bitwise_not(img)

        # Detect blobs.
        keypoints = detector.detect(img_not)
        img_with_keypoints = cv2.drawKeypoints(
            img, keypoints, np.array([]), (0, 0, 255),
            cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

        # draw a horizontal line in the center of the frame -- once an
        # object crosses this line we will determine whether they were
        # moving 'up' or 'down'
        pygame.draw.line(lcd, (255, 255, 255), (0, height // 2),
                         (width, height // 2), 2)
        pygame.display.update()

        for i in range(0, len(keypoints)):
            x = keypoints[i].pt[0]
            y = keypoints[i].pt[1]

            # print circle around blob
            pygame.draw.circle(lcd, (200, 0, 0), (int(x), int(y)),
                               round(keypoints[i].size), 2)

        # update our centroid tracker using the detected centroids
        objects = ct.update(keypoints)

        # loop over the tracked objects
        for (objectID, centroid) in objects.items():
            # check to see if a trackable object exists for the current
            # object ID
            to = trackableObjects.get(objectID, None)

            # if there is no existing trackable object, create one
            if to is None:
                to = TrackableObject(objectID, centroid)

            # otherwise, there is a trackable object so we can utilize it
            # to determine direction
            else:
                # the difference between the y-coordinate of the *current*
                # centroid and the mean of *previous* centroids will tell
                # us in which direction the object is moving (negative for
                # 'up' and positive for 'down')
                y = [c[1] for c in to.centroids]
                direction = centroid[1] - np.mean(y)
                to.centroids.append(centroid)

                # check to see if the object has been counted or not
                if not to.counted:
                    # if the direction is negative (indicating the object
                    # is moving up) AND the centroid is above the center
                    # line, count the object
                    # the historical centroid must present in the lower half of the screen
                    if direction < 0 and centroid[
                            1] < height // 2 and count_within_range(
                                y, height // 2, height) > 0:
                        total_up += 1
                        to.counted = True

                    # if the direction is positive (indicating the object
                    # is moving down) AND the centroid is below the
                    # center line, count the object
                    # the historical centroid must present in the upper half of the screen
                    elif direction > 0 and centroid[
                            1] > height // 2 and count_within_range(
                                y, 0, height // 2) > 0:
                        total_down += 1
                        to.counted = True

            # store the trackable object in our dictionary
            trackableObjects[objectID] = to

        # update counter in top left
        textsurface1 = myfont.render("IN: " + str(total_up), False,
                                     (255, 255, 255))
        textsurface2 = myfont.render('OUT: ' + str(total_down), False,
                                     (255, 255, 255))
        lcd.blit(textsurface1, (0, 0))
        lcd.blit(textsurface2, (0, 25))

        total_up_old = total_up
        total_down_old = total_down

        pygame.display.update()

        for event in pygame.event.get():
            if event.type == pygame.KEYDOWN:
                print('terminating...')
                screencap = False
                break

        # for running the save on for a certain amount of time
        # if time.time() - start_time >= 10:
        #    print('terminating...')
        #    screencap = False

        # empty mode_list every AMBIENT_TIME seconds
        if len(mode_list) > AMBIENT_TIME:
            mode_list = []
        time.sleep(max(1. / 25 - (time.time() - start), 0))

    # Release everything if job is finished
    cv2.destroyAllWindows()
def run():
    with picamera.PiCamera() as camera:

        # Set the camera parameters
        x = 400
        #       camera.resolution = (int(640), x)
        camera.resolution = (544, x)
        # Various optional camera settings below:
        camera.iso = 100
        camera.framerate = 30
        camera.awb_mode = 'off'
        camera.exposure_mode = "off"
        #      camera.framerate = Fraction (1,6)
        #red/blue camera ratios from 0 to 8

        #       camera.awb_gains = (Red_gain,Blue_gain)

        # Need to sleep to give the camera time to get set up properly
        time.sleep(1)

        with picamera.array.PiRGBArray(camera) as stream:
            # Loop constantly
            while True:
                # Grab data from the camera, in colour format
                # NOTE: This comes in BGR rather than RGB, which is important
                # for later!
                camera.capture(stream, format='bgr', use_video_port=True)
                image = stream.array

                image1 = image

                # Get the individual colour components of the image
                b, g, r = cv2.split(image)

                f = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)

                #                blurred_f = ndimage.gaussian_filter(f, 3)

                #               filter_blurred_f = ndimage.gaussian_filter(blurred_f, 1)

                #               alpha = 10
                #               sharpened = blurred_f + alpha * (blurred_f - filter_blurred_f)

                #                image1= contrast_stretch(image1)
                #               image1 = image1.astype(np.uint8)

                ex = ndimage.sobel(f, axis=0, mode='constant')
                ey = ndimage.sobel(f, axis=1, mode='constant')
                edges = np.hypot(ex.astype(float), ey.astype(float))
                edges = edges.astype(np.uint8)
                print(edges.shape)

                #################################

                #               maskr = cv2.resize(mask,None,fx=rsize, fy=rsize, interpolation = cv2.INTER_CUBIC)
                #               imgr = cv2.resize(image,None,fx=rsize, fy=rsize, interpolation = cv2.INTER_CUBIC)

                image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)

                Hstart = cv2.getTrackbarPos('Hstart', 'Trackbars')
                Hend = cv2.getTrackbarPos('Hend', 'Trackbars')

                Sstart = cv2.getTrackbarPos('Sstart', 'Trackbars')
                Send = cv2.getTrackbarPos('Send', 'Trackbars')

                Vstart = cv2.getTrackbarPos('Vstart', 'Trackbars')
                Vend = cv2.getTrackbarPos('Vend', 'Trackbars')

                lower_range = np.array([Hstart, Sstart, Vstart],
                                       dtype=np.uint8)
                upper_range = np.array([Hend, Send, Vend], dtype=np.uint8)

                mask = cv2.inRange(image, lower_range, upper_range)

                result = cv2.bitwise_and(image, image, mask=mask)
                #              imgr = cv2.resize(result,None,fx=rsize, fy=rsize, interpolation = cv2.INTER_CUBIC)

                ################################################################

                blob = cv2.cvtColor(result, cv2.COLOR_BGR2GRAY)

                # Setup SimpleBlobDetector parameters.
                params = cv2.SimpleBlobDetector_Params()

                # Change thresholds
                params.minThreshold = cv2.getTrackbarPos(
                    'minThreshold', 'Trackbars1')
                params.maxThreshold = cv2.getTrackbarPos(
                    'maxThreshold', 'Trackbars1')

                # Filter by Area.
                params.filterByArea = True
                params.minArea = cv2.getTrackbarPos('MinArea', 'Trackbars1')

                # Filter by Circularity
                params.filterByCircularity = True
                params.minCircularity = cv2.getTrackbarPos(
                    'Circularity', 'Trackbars1') / 10

                # Filter by Convexity
                params.filterByConvexity = True
                params.minConvexity = cv2.getTrackbarPos(
                    'Convexity', 'Trackbars1') / 100

                # Filter by Inertia
                params.filterByInertia = True
                params.minInertiaRatio = cv2.getTrackbarPos(
                    'InertiaRatio', 'Trackbars1') / 100

                detector = cv2.SimpleBlobDetector_create(params)

                # Detect blobs.
                keypoints = detector.detect(blob)

                # Draw detected blobs as red circles.
                # cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
                image_blob = cv2.drawKeypoints(
                    blob, keypoints, np.array([]), (0, 255, 255),
                    cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

                detector = cv2.SimpleBlobDetector_create(params)

                #get info from Trackbars

                Exposure_Comp = cv2.getTrackbarPos("Exposure Comp",
                                                   "Trackbars")
                Red_gain = cv2.getTrackbarPos("Red Gain", "Trackbars")
                Blue_gain = cv2.getTrackbarPos("Blue Gain", "Trackbars")
                Frame_rate = cv2.getTrackbarPos("Frame Rate", "Trackbars")
                Contrast = cv2.getTrackbarPos('Contrast', "Trackbars")
                Brightness = cv2.getTrackbarPos('Brightness', "Trackbars")
                ISO = cv2.getTrackbarPos('ISO', "Trackbars")
                Exp = cv2.getTrackbarPos('Exposure', "Trackbars")
                Saturation = cv2.getTrackbarPos('Saturation', "Trackbars")
                Sharpness = cv2.getTrackbarPos('Sharpness', "Trackbars")
                Effects = cv2.getTrackbarPos('Effects', "Trackbars")

                #scale camera settings

                camera.exposure_compensation = Exposure_Comp - 25
                camera.awb_gains = (Red_gain / 10, Blue_gain / 10)
                camera.framerate = Frame_rate
                camera.contrast = Contrast - 100
                camera.brightness = Brightness
                camera.exposure_mode = exposure_number[Exp]
                camera.saturation = Saturation - 100
                camera.sharpness = Sharpness - 100
                camera.image_effect = effect_number[Effects]

                # Label images
                label(image1, 'RGB')
                label(b, 'B')
                label(r, 'R')

                # Combine ready for display
                #               combined = disp_multiple(blankimg,sharpened,result,image_blob)
                combined = disp_multiple(blankimg, result, image, edges)

                #               write video
                cv2.putText(combined, "Exposure Compensation:", (10, 25), font,
                            1, (256, 256, 256), 2)
                cv2.putText(combined, str(camera.exposure_compensation),
                            (450, 25), font, 1, (256, 256, 256), 2)

                cv2.putText(combined, "Blue", (10, 55), font, 1, (256, 0, 0),
                            2)
                cv2.putText(combined, "/", (80, 55), font, 1, (256, 256, 256),
                            2)
                cv2.putText(combined, "Red Gain:", (110, 55), font, 1,
                            (0, 0, 256), 2)
                cv2.putText(combined, str(Red_gain / 10), (470, 55), font, 1,
                            (0, 0, 256), 2)
                cv2.putText(combined, "/", (450, 55), font, 1, (256, 256, 256),
                            2)
                cv2.putText(combined, str(Blue_gain / 10), (400, 55), font, 1,
                            (256, 0, 0), 2)

                cv2.putText(combined, "Frame Rate:", (10, 85), font, 1,
                            (256, 256, 256), 2)
                cv2.putText(combined, str(camera.framerate), (450, 85), font,
                            1, (256, 256, 256), 2)

                cv2.putText(combined, "Contrast:", (10, 115), font, 1,
                            (256, 256, 256), 2)
                cv2.putText(combined, str(camera.contrast), (450, 115), font,
                            1, (256, 256, 256), 2)

                cv2.putText(combined, "Brightness:", (10, 145), font, 1,
                            (256, 256, 256), 2)
                cv2.putText(combined, str(camera.brightness), (450, 145), font,
                            1, (256, 256, 256), 2)

                cv2.putText(combined, "Saturation:", (10, 175), font, 1,
                            (256, 256, 256), 2)
                cv2.putText(combined, str(camera.saturation), (450, 175), font,
                            1, (256, 256, 256), 2)

                cv2.putText(combined, "Sharpness:", (10, 205), font, 1,
                            (256, 256, 256), 2)
                cv2.putText(combined, str(camera.sharpness), (450, 205), font,
                            1, (256, 256, 256), 2)

                cv2.putText(combined, "Exposure:", (10, 235), font, 1,
                            (256, 256, 256), 2)
                cv2.putText(combined, str(camera.exposure_mode), (355, 235),
                            font, 1, (256, 256, 256), 2)

                cv2.putText(combined, "Image Effect:", (10, 265), font, 1,
                            (256, 256, 256), 2)
                cv2.putText(combined, str(camera.image_effect), (355, 265),
                            font, 1, (256, 256, 256), 2)

                cv2.putText(combined, "ISO:", (10, 295), font, 1,
                            (256, 256, 256), 2)
                cv2.putText(combined, str(camera.iso), (355, 295), font, 1,
                            (256, 256, 256), 2)

                cv2.putText(combined, "Exposure Speed:", (10, 325), font, 1,
                            (256, 256, 256), 2)
                cv2.putText(combined,
                            str(round(camera.exposure_speed / 1000000, 4)),
                            (355, 325), font, 1, (256, 256, 256), 2)

                cv2.putText(combined, "Analog Gain:", (10, 355), font, 1,
                            (256, 256, 256), 2)
                cv2.putText(combined, str(round(float(camera.analog_gain), 2)),
                            (355, 355), font, 1, (256, 256, 256), 2)

                cv2.putText(combined, "Digital Gain:", (10, 385), font, 1,
                            (256, 256, 256), 2)
                cv2.putText(combined, str(round(float(camera.digital_gain),
                                                2)), (355, 385), font, 1,
                            (256, 256, 256), 2)

                cv2.putText(combined, "B", (460, 450), font, 2, (255, 0, 0), 4)
                cv2.putText(combined, "R", (1020, 50), font, 2, (0, 0, 255), 4)

                # use for video recording

                #              out.write(combined)

                # Display
                cv2.imshow('Public Lab', combined)

                stream.truncate(0)

                #  press ESC to break
                c = cv2.waitKey(7) % 0x100
                if c == 27:
                    break

    # cleanup or things will get messy
    cv2.destroyAllWindows()
    cap.release()
    out.release()
Exemple #6
0
def blob_parameter(state_type):
    ''' blob_parameter function for making detector for some blob shapes(circle or triangle)
            & setting parameter of detector
        * Input 
            state_type : recognition type in recognition_list (ex : 'parking')
        * Output
            blob_detector : blob detector that has parameters setted by recognition type 
    '''
    if state_type == 'traffic_light':
        # Setup SimpleBlobDetector parameters.
        params = cv2.SimpleBlobDetector_Params()
        params.minThreshold = 0
        params.maxThreshold = 256
        params.filterByArea = True
        params.minArea = 500
        params.maxArea = 2300
        params.filterByCircularity = True
        params.minCircularity = 0.4
        params.filterByConvexity = True
        params.minConvexity = 0.1
        params.filterByInertia = True
        params.minInertiaRatio = 0.01

    elif state_type == 'intersection' or state_type == 'construction' or state_type == 'turnel':
        # Setup SimpleBlobDetector parameters.
        params = cv2.SimpleBlobDetector_Params()
        params.minThreshold = 10
        params.maxThreshold = 200
        params.filterByArea = True
        params.minArea = 500
        params.filterByCircularity = True
        params.minCircularity = 0.1
        params.filterByConvexity = False
        params.minConvexity = 0.1
        params.filterByInertia = True
        params.minInertiaRatio = 0.01

    else:
        # Setup SimpleBlobDetector parameters.
        params = cv2.SimpleBlobDetector_Params()
        params.minThreshold = 0
        params.maxThreshold = 256
        params.filterByArea = True
        params.minArea = 1000
        params.maxArea = 35000
        params.filterByCircularity = True
        params.minCircularity = 0.5
        params.filterByConvexity = True
        params.minConvexity = 0.1
        params.filterByInertia = True
        params.minInertiaRatio = 0.01

    # Create a detector with the parameters
    ver = (cv2.__version__).split('.')

    if int(ver[0]) < 3:
        blob_detector = cv2.SimpleBlobDetector(params)
    else:
        blob_detector = cv2.SimpleBlobDetector_create(params)

    return blob_detector
Exemple #7
0
    def __init__(self, port="COM4", baudrate=9600):
        """
        -Pupilometer object
        takes serial port and baudrate as parameters in constructor.
        -example specification of serial ports in Windows: COM1, COM2...
        -example specification of serial ports in Ubuntu: /dev/ttyUSB0, /dev/ttyUSB1...
        -example specification of serial ports in Debian(Raspberry Pi): /dev/ttyACM0, /dev/ttyACM1...
        -If the computer is Unix based Operating System; it can be checked from terminal 'ls /dev/tty*' during the
        plugging in the serial cable(mostly USB cables are machine end side)
        -Baudrate is related to the frequency of the sending and receiving messages from/to micro-controller or
        a machine. So it is mandatory to test it, it will affect the frame per seconds in the camera manner of the
        program. Baudrates to be tested are; 9600, 19200, 28800, 38400, 57600, 76800, 115200
        """
        self.port = port  # specify the serial bus
        self.baud = baudrate  # specify the baudrate
        cv2.namedWindow(
            'image'
        )  # for sliders to come up in same window with the camera feed
        cv2.createTrackbar(
            'thr_left', 'image', 0, 255,
            nothing)  # create slider for threshold value (left eye)
        cv2.createTrackbar(
            'thr_right', 'image', 0, 255,
            nothing)  # create slider for threshold value (right eye)
        # specify haarcascade classifiers
        self.eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
        self.face_cascade = cv2.CascadeClassifier(
            'haarcascade_frontalface_default.xml')
        # set blob detector parameters
        self.detector_params = cv2.SimpleBlobDetector_Params()
        self.detector_params.filterByArea = True
        self.detector_params.maxArea = 1500
        self.detector = cv2.SimpleBlobDetector_create(self.detector_params)

        self.cap = cv2.VideoCapture(
            0)  # specify the camera that we're going to use and open it.
        time.sleep(1)  # warm up the camera a bit
        # To use the serial bus comment out this section.
        # TODO: set the baudrate closer to the camera fps frequincy. Values between 4800 and 115200 must be tested.
        # self.serial_dev = serial.Serial(self.port, self.baud, timeout=0.1)
        # TODO: add a microcontroller and comment out this section

        # Bunch of global variables inside of a class
        self.right_pupil_area = 0
        self.left_pupil_area = 0
        self.left = None
        self.right = None
        self.eyes = None
        self.eye_center = 0
        self.frame = None
        self.left_eye_keypoints = []
        self.left_eye_wp = 0
        self.left_eye_processed = None
        self.right_eye_keypoints = []
        self.right_eye_wp = 0
        self.right_eye_processed = None
        self.luminance = 0
        self.data = {
            "name": "",
            "surname": "",
            "phone_num": ""
        }  # initial dictionary
Exemple #8
0
    def __init__(self, img, feature):

        # sharpening
        gray_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        blurred = cv2.bilateralFilter(gray_image, 9, 75, 75)
        gray_image = cv2.addWeighted(gray_image, 1.5, blurred, -0.5, 0)
        gray_image = cv2.bilateralFilter(gray_image, 9, 75, 75)

        # Extract ROI
        x, y, w, h = cv2.boundingRect(gray_image)  # (x=0,y=0,w=1920,h=1080)
        #print("x,y,w,h: ", x,y,w,h)
        self.roi = img[y + int(h / 2.5):y + h - int(h / 2.5),
                       x + int(w / 2.5):x + w - int(w / 2.5)]

        # Draw a bounding of ROI
        self.img_with_bounding = img.copy()
        cv2.rectangle(self.img_with_bounding,
                      (x + int(w / 2.5), y + int(h / 2.5)),
                      (x + w - int(w / 2.5), y + h - int(h / 2.5)),
                      (0, 0, 255), 2)

        # Find Needle position
        self.gray_roi = cv2.cvtColor(self.roi, cv2.COLOR_BGR2GRAY)
        x, y, w, h = cv2.boundingRect(self.gray_roi)
        #print("xr,yr,wr,hr: ", x,y,w,h)
        self.needle_pose = np.array([[w / 2, h / 2]])

        # Otsu's thresholding
        ret, self.th = cv2.threshold(self.gray_roi, 0, 255,
                                     cv2.THRESH_BINARY + cv2.THRESH_OTSU)

        # Otsu's thresholding after Gaussian filtering
        self.blur = cv2.GaussianBlur(self.gray_roi, (5, 5), 0)
        ret2, self.th2 = cv2.threshold(self.blur, 0, 255,
                                       cv2.THRESH_BINARY + cv2.THRESH_OTSU)

        # Morphological filtering
        kernel = np.ones((2, 2), np.uint8)
        self.dilation = cv2.dilate(self.th, kernel, iterations=1)
        #opening = cv2.morphologyEx(th, cv2.MORPH_OPEN, kernel)

        if feature == 'ORB':
            # Initiate ORB object
            orb = cv2.ORB_create(nfeatures=5000,
                                 scaleFactor=1.1,
                                 nlevels=10,
                                 scoreType=cv2.ORB_FAST_SCORE,
                                 patchSize=100)

            # find the keypoints with ORB
            keypoints = orb.detect(self.gray_roi, None)

            # compute the descriptors with ORB
            keypoints, descriptors = orb.compute(self.gray_roi, keypoints)
            self.point2f = cv2.KeyPoint_convert(keypoints)

            # retval = cv2.ORB.getMaxFeatures(orb)
            # print('retval: ',retval)
            # print('number of Kp: ', len(keypoints))
        elif feature == 'FAST':
            # Initiate FAST object with default values
            fast = cv2.FastFeatureDetector_create(10, True, 2)

            # TYPE_5_8 = 0, TYPE_7_12 = 1, TYPE_9_16 = 2

            # find and draw the keypoints
            keypoints = fast.detect(self.th2, None)
            self.point2f = cv2.KeyPoint_convert(keypoints)

        elif feature == 'BLOB':
            # Setup SimpleBlobDetector parameters.
            params = cv2.SimpleBlobDetector_Params()

            # Change thresholds
            params.minThreshold = 10
            params.maxThreshold = 200

            # Filter by Area.
            params.filterByArea = True
            params.minArea = 5

            # Filter by Circularity
            params.filterByCircularity = True
            params.minCircularity = 0.1

            # Filter by Convexity
            params.filterByConvexity = True
            params.minConvexity = 0.5

            # Filter by Inertia
            params.filterByInertia = True
            params.minInertiaRatio = 0.01

            # Create a detector with the parameters
            ver = (cv2.__version__).split('.')
            if int(ver[0]) < 3:
                detector = cv2.SimpleBlobDetector(params)
            else:
                detector = cv2.SimpleBlobDetector_create(params)

            # Detect blobs.
            keypoints = detector.detect(self.gray_roi)
            self.point2f = cv2.KeyPoint_convert(keypoints)

        else:
            print('Error in feature type')
            sys.exit(1)

        # draw only the location of the keypoints without size or orientation
        self.final_keypoints = cv2.drawKeypoints(self.roi,
                                                 keypoints,
                                                 None,
                                                 color=(0, 255, 0),
                                                 flags=0)

        # split channel
        b_channel, g_channel, r_channel = cv2.split(self.final_keypoints)
        alpha_channel = np.ones(
            b_channel.shape,
            dtype=b_channel.dtype) * 50  #creating a dummy alpha channel image.
        img_BGRA = cv2.merge((b_channel, g_channel, r_channel, alpha_channel))

        # Create new layers
        layer_1 = np.zeros((h, w, 4))
        layer_2 = np.zeros((h, w, 4))

        # Draw a blue line with thickness of 1 px on layer_1
        cv2.line(layer_1, (int(w / 2) - 20, int(h / 2)),
                 (int(w / 2) + 20, int(h / 2)), (255, 0, 0, 255), 1)
        cv2.line(layer_1, (int(w / 2), int(h / 2) - 20),
                 (int(w / 2), int(h / 2) + 20), (255, 0, 0, 255), 1)

        # cv2.line(layer_1,(int(w/2)-60,int(h/2)),(int(w/2)-20,int(h/2)),(255,0,0,255),1)
        # cv2.line(layer_1,(int(w/2)-40,int(h/2)-20),(int(w/2)-40,int(h/2)+20),(255,0,0,255),1)

        # Draw a red closed circle on layer_2
        cv2.circle(layer_2, (int(w / 2), int(h / 2)), 10, (0, 0, 255, 255), 1)

        # copy the first layer into the resulting image
        self.reimg = img_BGRA[:]

        #overlay each drawing parts
        cnd = layer_1[:, :, 3] > 0
        self.reimg[cnd] = layer_1[cnd]
        cnd = layer_2[:, :, 3] > 0
        self.reimg[cnd] = layer_2[cnd]
Exemple #9
0
def extract_binary_masks_blob(A,
                              neuron_radius,
                              dims,
                              num_std_threshold=1,
                              minCircularity=0.5,
                              minInertiaRatio=0.2,
                              minConvexity=.8):
    """
    Function to extract masks from data. It will also perform a preliminary selectino of good masks based on criteria like shape and size

    Parameters:
    ----------
    A: scipy.sparse matris
        contains the components as outputed from the CNMF algorithm

    neuron_radius: float
        neuronal radius employed in the CNMF settings (gSiz)

    num_std_threshold: int
        number of times above iqr/1.349 (std estimator) the median to be considered as threshold for the component

    minCircularity: float
        parameter from cv2.SimpleBlobDetector

    minInertiaRatio: float
        parameter from cv2.SimpleBlobDetector

    minConvexity: float
        parameter from cv2.SimpleBlobDetector

    Returns:
    --------
    masks: np.array

    pos_examples:

    neg_examples:

    """
    params = cv2.SimpleBlobDetector_Params()
    params.minCircularity = minCircularity
    params.minInertiaRatio = minInertiaRatio
    params.minConvexity = minConvexity

    # Change thresholds
    params.blobColor = 255

    params.minThreshold = 0
    params.maxThreshold = 255
    params.thresholdStep = 3

    params.minArea = np.pi * ((neuron_radius * .75)**2)

    params.filterByColor = True
    params.filterByArea = True
    params.filterByCircularity = True
    params.filterByConvexity = True
    params.filterByInertia = True

    detector = cv2.SimpleBlobDetector_create(params)

    masks_ws = []
    pos_examples = []
    neg_examples = []

    for count, comp in enumerate(A.tocsc()[:].T):

        print(count)
        comp_d = np.array(comp.todense())
        gray_image = np.reshape(comp_d, dims, order='F')
        gray_image = (gray_image - np.min(gray_image)) / \
            (np.max(gray_image) - np.min(gray_image)) * 255
        gray_image = gray_image.astype(np.uint8)

        # segment using watershed
        markers = np.zeros_like(gray_image)
        elevation_map = sobel(gray_image)
        thr_1 = np.percentile(gray_image[gray_image > 0], 50)
        iqr = np.diff(np.percentile(gray_image[gray_image > 0], (25, 75)))
        thr_2 = thr_1 + num_std_threshold * iqr / 1.35
        markers[gray_image < thr_1] = 1
        markers[gray_image > thr_2] = 2
        edges = watershed(elevation_map, markers) - 1
        # only keep largest object
        label_objects, _ = ndi.label(edges)
        sizes = np.bincount(label_objects.ravel())

        if len(sizes) > 1:
            idx_largest = np.argmax(sizes[1:])
            edges = (label_objects == (1 + idx_largest))
            edges = ndi.binary_fill_holes(edges)
        else:
            print('empty component')
            edges = np.zeros_like(edges)

        masks_ws.append(edges)
        keypoints = detector.detect((edges * 200.).astype(np.uint8))

        if len(keypoints) > 0:
            pos_examples.append(count)
        else:
            neg_examples.append(count)

    return np.array(masks_ws), np.array(pos_examples), np.array(neg_examples)
Exemple #10
0
def localize_object_dominant_color(frame): ##IMG : String of file path
    ##crop values!
    y1 = 0
    y2 = 470
    x1 = 143
    x2 = 530

    frame = frame[y1:y2, x1:x2] ##crop that bitch

    ##KEEP ORIGINAL COLOR COPY
    original = frame.copy()
    original = cv2.cvtColor(original, cv2.COLOR_BGR2HSV)  ##WE WANT TO FIND HSV DOMINANT COLOR

    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    ##BACKGROUND IMAGES REQURED################
    BgmImg1 = cv2.imread('BACKGROUND/EmptyTable_6.jpg', 0)
    BgmImg1 = BgmImg1[y1:y2, x1:x2]
    BgmImg2 = cv2.imread('BACKGROUND/EmptyTable_7.jpg', 0)
    BgmImg2 = BgmImg2[y1:y2, x1:x2]
    BgmImg3 = cv2.imread('BACKGROUND/EmptyTable_8.jpg', 0)
    BgmImg3 = BgmImg3[y1:y2, x1:x2]
    ###########################################

    BgmImg = np.mean(np.array([BgmImg1, BgmImg2, BgmImg3]), axis=0)

    #~~~~~~~~~trial code

    BgmImg = cv2.GaussianBlur(BgmImg, (21, 21), 0)

    ##~~~~~~~~~~~~`end

    highlight = np.abs(frame.astype(np.int) - BgmImg.astype(np.int))

    ##threshold
    highlight[highlight > 20] = 255
    highlight[highlight < 20] = 0

    ##compatable with cv2 type
    highlight = highlight.astype(np.uint8)

    ##invert colors
    highlight = cv2.bitwise_not(highlight)

    ###BLOB DETECTOR
    # Setup SimpleBlobDetector parameters.
    params = cv2.SimpleBlobDetector_Params()

    # Change thresholds
    params.minThreshold = 10;
    params.maxThreshold = 200;

    # Filter by Area.
    params.filterByArea = True
    params.minArea = 50

    # Filter by Circularity
    params.filterByCircularity = False

    # Filter by Convexity
    params.filterByConvexity = True
    params.minConvexity = 0.87

    # Filter by Inertia
    params.filterByInertia = True
    params.minInertiaRatio = 0.01

    # Create a detector with the parameters
    ver = (cv2.__version__).split('.')

    if int(ver[0]) < 3:

        detector = cv2.SimpleBlobDetector(params)
    else:

        detector = cv2.SimpleBlobDetector_create(params)

    # Detect blobs.
    keypoints = detector.detect(highlight)

    im_with_keypoints = cv2.drawKeypoints(original, keypoints, np.array([]), (0, 0, 255))

    keypoint = keypoints[0].pt

    '''
    #commented code here useful for debugging blob misstakes
    #Draw detected blobs as red circles.
    cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
    cv2.imshow("t",im_with_keypoints )
    cv2.waitKey(0)
    time.sleep(30)
    '''

    frame = crop_blob(original,keypoint,10) ##original already converted to HSV

    cv2.imwrite("im_with_keypoints.jpg",frame)


    d = get_dominant_color_in_HSV(frame)

    return d
    def process(self, frame):
        with open(self.inputs["config"]) as f:
            cfg = json.load(f)
            tuning = cfg["cvTuning"]

        mat = np.array([[1, 0, frame.shape[1] / 2], [0, 1, frame.shape[0] / 2],
                        [0, 0, 1]])
        dist = np.array(cfg["distortion"])
        undistorted = cv2.undistort(frame, mat, dist)
        out_image = self.inputs["bridge"].cv2_to_imgmsg(undistorted, "mono8")
        # out_image = self.inputs["bridge"].cv2_to_imgmsg(self.hot_mask, "mono8")
        self.inputs["debug"].publish(out_image)

        frame = undistorted

        blur = tuning["blur"]
        if self.ping_mode:
            fg = self.get_ping_foreground(frame, tuning)
        else:
            fg = self.fgbg.apply(frame)

        if self.frame_count < 10:
            return

        fg = fg.astype(np.uint8)

        fgcopy = fg.copy()
        in_color_copy = cv2.cvtColor(fgcopy, cv2.COLOR_GRAY2RGB)
        # return in_color_copy

        # kernel = np.ones((1, 1), np.uint8)
        # fg = cv2.erode(fg, kernel, iterations=1)

        # fg = cv2.blur(fg, (blur, blur))
        # fg = cv2.blur(fg, (blur, blur))

        # fg = cv2.blur(fg, (blur, blur))
        # fg = cv2.blur(fg, (blur, blur))
        if tuning["threshold"]:
            fg[fg > 0] = 255

        in_color = cv2.cvtColor(fg, cv2.COLOR_GRAY2RGB)

        params = cv2.SimpleBlobDetector_Params()
        params.filterByCircularity = False
        params.filterByConvexity = False
        params.minDistBetweenBlobs = tuning["minDistBetweenBlobs"]
        params.filterByArea = True
        params.minArea = tuning["minArea"]
        params.maxArea = tuning["maxArea"]
        params.filterByColor = False
        params.filterByInertia = False
        params.blobColor = 255

        detector = cv2.SimpleBlobDetector_create(params)
        with Timer("blobs"):
            keypoints = detector.detect(fg)

        ar = PoseArray()
        ar.header.frame_id = "/map"
        ar.header.stamp = rospy.Time.now()

        # timestamp in ms
        now = time.time()
        millis = int((now - self.start_time) * 1000)
        if millis > 0x7fffffff:
            # OSC integers are limited to 32 bits, so once every ~24 days
            # we have to reset the timestamp counter to zero.
            self.start_time = now
            millis = 0

        oscmsg = OSC.OSCMessage()
        oscmsg.setAddress("/blobs")
        oscmsg.append(self.config["osc"]["sourceId"])
        oscmsg.append(millis)

        track_list = list()

        # im_with_keypoints = cv2.drawKeypoints(in_color, keypoints, np.array(
        # []), (255, 0, 0), cv2.DRAW_MATCHES_FLAGS_DEFAULT)

        keypoints = map(lambda k: k.pt, keypoints)
        tracks, ids = self.tracker.update(keypoints)

        oscmsg.append(len(tracks))

        self.draw_points(in_color, keypoints, (255, 0, 0), solid=True)
        self.draw_points(in_color, tracks, (0, 0, 255), rad=4)

        for i, k in enumerate(tracks):
            x, y = self.photo_to_world(*k)
            size = 1  # TODO
            oscmsg.append(ids[i])
            oscmsg.append(x)
            oscmsg.append(y)
            oscmsg.append(size)

            pose = Pose()
            pose.position = Point(x, y, 0)
            ar.poses.append(pose)

            cv2.putText(
                in_color,
                "%s %d, %d" % (ids[i], x, y),
                (int(k[0] + 4), int(k[1] - 4)),
                cv2.FONT_HERSHEY_SIMPLEX,
                0.32,
                (0, 0, 255),
            )

        # ntracks = self.tracker.update(np.array(track_list))
        # rospy.loginfo("TRACKER %d %d", len(keypoints), len(ntracks))

        osc_config = cfg.get('osc', {})
        if osc_config.get('send'):
            hosts = osc_config["hosts"]
            port = osc_config["port"]
            clients = list()
            if not self.osc_clients:
                for host in hosts:
                    try:
                        client = OSC.OSCClient()
                        client.connect((host, port))
                    except:
                        sys.stderr.write('could not connect: %r\n' %
                                         osc_config)
                    else:
                        clients.append(client)
                self.osc_clients = clients
            if self.osc_clients:
                #sys.stderr.write('send %r\n' % oscmsg)
                for client in self.osc_clients:
                    try:
                        client.send(oscmsg)
                    except Exception as e:
                        client_address = client.address()
                        if client_address is None:
                            continue

                        addr_str = '{0}:{1}'.format(*client_address)

                        if addr_str in self.last_ip_err_time and monotonic(
                        ) - self.last_ip_err_time[addr_str] < 10:
                            continue

                        sys.stderr.write(
                            'could not send OSC packet to ({0}): {1}\n'.format(
                                addr_str, e))
                        self.last_ip_err_time[addr_str] = monotonic()
        else:
            self.osc_client = None

        if osc_config.get('record'):
            with open(osc_config['record'], 'a') as file:
                #sys.stderr.write('record %r\n' % oscmsg)
                file.write(json.dumps(list(oscmsg)) + '\n')

        self.inputs["pose_pub"].publish(ar)

        # ret, thresh = cv2.threshold(fg, 127, 255, 0)
        # im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
        # for i, c in enumerate(contours):
        #     color = tuple(map(lambda x: x * 255, colorsys.hsv_to_rgb(float(i + 1) * 360.0 / len(contours), 1, 1)))
        #     rospy.loginfo("NEAT %s", str(color))
        #     cv2.drawContours(in_color, [c], 0, color, 1)

        return in_color
Exemple #12
0
    def detectBlocksInDepthImage(self):
        # #---------- hard coded color HSV range-------
        # green_lower = np.array([30, 15, 100])
        # green_upper = np.array([100, 100, 200])
        # green = [green_lower, green_upper]

        # black_lower = np.array([100, 0, 40])
        # black_upper = np.array([180, 80, 80])
        # black = [black_lower, black_upper]

        # red_lower = np.array([150, 160, 100])
        # red_upper = np.array([200, 255, 150])
        # red = [red_lower, red_upper]

        # orange_lower = np.array([0, 150, 180])
        # orange_upper = np.array([20, 255, 230])
        # orange = [orange_lower, orange_upper]

        # yellow_lower = np.array([21, 200, 231])
        # yellow_upper = np.array([40, 255, 255])
        # yellow = [yellow_lower, yellow_upper]

        # blue_lower = np.array([100, 90, 120])
        # blue_upper = np.array([130, 180, 200])
        # blue = [blue_lower, blue_upper]

        # purple_lower = np.array([121, 30, 130])
        # purple_upper = np.array([186, 88, 180])
        # purple = [purple_lower, purple_upper]

        # pink_lower = np.array([150, 150, 180]) # how to differentiate pinnk and orange
        # pint_upper = np.array([200, 209, 203])
        # pink = [pink_lower, pint_upper]

        # color_all = [green, black, red, orange, yellow, blue, purple, pink]
        # color_strings = [ "green", "black", "red", "orange", "yellow", "blue", "purple", "pink" ]
        """
        TODO:
        Implement a blob detector to find blocks
        in the depth image
        """
        rgb_frame = self.currentVideoFrame
        print('-----size of rgb_frame')
        print rgb_frame.shape
        hsv_frame = cv2.cvtColor(rgb_frame, cv2.COLOR_RGB2HSV)
        self.captureDepthFrame()
        self.convertDepthFrame()
        depth_frame = self.DepthCM
        # depth_frame_crop = depth_frame[93:435, 135:475]
        params = cv2.SimpleBlobDetector_Params()

        # Change thresholds
        params.minThreshold = 10
        # params.minThreshold = 50;
        params.maxThreshold = 300

        # Filter by Area.
        params.filterByArea = True
        # params.minArea = 1000
        # params.maxArea = 40000
        params.minArea = 250
        params.maxArea = 1500

        # Filter by Circularity
        params.filterByCircularity = True
        params.minCircularity = 0.1
        # params.maxCircularity = 0.85

        # Filter by Convexity
        params.filterByConvexity = True
        params.minConvexity = 0.01
        # Filter by Inertia
        params.filterByInertia = True
        params.minInertiaRatio = 0.01

        detector = cv2.SimpleBlobDetector_create(params)
        # print('----detector good-----')
        keypoints = detector.detect(depth_frame)
        # print('----keypoints good-----')
        # cv2.imshow('depth_frame',depth_frame)
        # cv2.waitKey(0)
        im_with_keypoints = cv2.drawKeypoints(
            depth_frame, keypoints, np.array([]), (0, 0, 255),
            cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
        imgplot = plt.imshow(im_with_keypoints)
        plt.show()
        # cv2.imshow("Keypoints", im_with_keypoints)
        # cv2.waitKey(0)
        #####---------------range, this can be done only with h value, can relax the bound
        # orange, h: 7, [0, 10]
        # yellow, h: 30 [20, 35]
        # green, h: 57, [50, 80]
        # blue, h: 113 [110, 120]
        #####-----------if h > 125, need additional info to decide the color, use r value
        # pink, h: 168, [165, 172], r: [190,255]
        # red, h: 174, [173, 180], r: [110, 189]
        ###-------problems
        # purple, h: 144 [136, 145], r: [80, 109]
        # black, h: 135 [150, 170], r: [0, 79]
        orange_h_l = np.array(0)
        orange_h_u = np.array(10)
        orange = [orange_h_l, orange_h_u]

        yellow_h_l = np.array(20)
        yellow_h_u = np.array(35)
        yellow = [yellow_h_l, yellow_h_u]

        green_h_l = np.array(50)
        green_h_u = np.array(80)
        green = [green_h_l, green_h_u]

        blue_h_l = np.array(110)
        blue_h_u = np.array(130)
        blue = [blue_h_l, blue_h_u]
        color_all_h = [orange, yellow, green, blue]
        color_strings_h = ["orange", "yellow", "green", "blue"]

        pink_r_l = np.array(190)
        pink_r_u = np.array(255)
        pink = [pink_r_l, pink_r_u]

        # red_r_l = np.array(110)
        red_r_l = np.array(146)
        red_r_u = np.array(189)
        red = [red_r_l, red_r_u]

        # purple_r_l = np.array(80)
        purple_r_l = np.array(110)
        purple_r_u = np.array(145)
        # purple_r_u = np.array(109)
        purple = [purple_r_l, purple_r_u]

        black_r_l = np.array(0)
        black_r_u = np.array(109)
        # black_r_u = np.array(79)
        black = [black_r_l, black_r_u]
        color_all_r = [pink, red, purple, black]
        color_strings_r = ["pink", "red", "purple", "black"]

        depth_frame_1layer = self.currentDepthFrame  # 1 layer depth frame for depth value

        points = []
        keypoints_depth = []
        keypoints_color = []
        keypoints_orientation = []

        y_lower_bound = 90
        y_upper_bound = 470
        x_lower_bound = 90
        x_upper_bound = 470
        for keyPoint in keypoints:
            # ----- find the rgb value
            print('------keyPoint.pt-----')
            print keyPoint.pt
            # print np.matmul(self.depth2rgb_affine, np.append([keyPoint.pt],[1]))
            if np.round(keyPoint.pt[0])>=x_lower_bound and np.round(keyPoint.pt[0])<=x_upper_bound and np.round(keyPoint.pt[1])>=y_lower_bound and np.round(keyPoint.pt[1])<=y_upper_bound \
            and depth_frame_1layer[int(np.round(keyPoint.pt[1]))][int(np.round(keyPoint.pt[0]))]<=720: #add another depth value info to make sure
                keyPoint_hsv = hsv_frame[int(np.round(keyPoint.pt[1]))][int(
                    np.round(keyPoint.pt[0]))][0:]  # x
                keyPoint_rgb = rgb_frame[int(np.round(keyPoint.pt[1]))][int(
                    np.round(keyPoint.pt[0]))][0:]  # x

                keyPoint_d = depth_frame_1layer[int(np.round(
                    keyPoint.pt[1]))][int(np.round(keyPoint.pt[0]))]
                keypoints_depth.append(keyPoint_d)

                print('--keyPoint_LOCATION-----')
                print keyPoint.pt
                print('--keyPoint_hsv-----')
                print keyPoint_hsv
                print('--keyPoint_rgb-----')
                print keyPoint_rgb
                # ----- decide the color of each blob
                if keyPoint_hsv[
                        0] <= 130:  # can determine the color from h value
                    for ind_clr_h, colr_range_h in enumerate(color_all_h):
                        if (keyPoint_hsv[0] >= colr_range_h[0]) and (
                                keyPoint_hsv[0] <= colr_range_h[1]):
                            print('-----color name------')
                            print color_strings_h[ind_clr_h]
                            #---------save the color name--------
                            keypoints_color.append(color_strings_h[ind_clr_h])
                            break

                else:  # decide color from r value
                    for ind_clr_r, colr_range_r in enumerate(color_all_r):
                        if (keyPoint_rgb[0] >= colr_range_r[0]) and (
                                keyPoint_rgb[0] <= colr_range_r[1]):
                            print('-----color name------')
                            print color_strings_r[ind_clr_r]
                            #---------save the color name--------
                            keypoints_color.append(color_strings_r[ind_clr_r])
                            break

                #------now figure out the orientation
                keyPoint_hsv_lower = np.zeros((3, ), dtype=int)
                keyPoint_hsv_upper = np.zeros((3, ), dtype=int)
                thereshold = 25
                for i in range(3):  # Hue range is [0,179]
                    if keyPoint_hsv[i] <= thereshold:
                        keyPoint_hsv_lower[i] = 0
                    else:
                        keyPoint_hsv_lower[i] = keyPoint_hsv[i] - thereshold

                    # tmp = 255 - thereshold
                    if i == 0:
                        if keyPoint_hsv[i] >= 179 - thereshold:
                            keyPoint_hsv_upper[i] = 179
                        else:
                            keyPoint_hsv_upper[
                                i] = keyPoint_hsv[i] + thereshold
                    else:
                        if keyPoint_hsv[i] >= 255 - thereshold:
                            keyPoint_hsv_upper[i] = 255
                        else:
                            keyPoint_hsv_upper[
                                i] = keyPoint_hsv[i] + thereshold

                # keyPoint_hsv_lower = keyPoint_hsv - 20
                # print keyPoint_hsv_lower
                # print np.clip(keyPoint_hsv_lower, 0, 0)
                # keyPoint_hsv_upper = keyPoint_hsv + 20
                # print keyPoint_hsv_upper
                # print np.clip(keyPoint_hsv_upper, 0, 255)
                # Threshold the HSV image to get only current colors
                # problem: backgraound is black, first crop image to only board
                hsv_frame_crop = hsv_frame[int(np.round(keyPoint.pt[1])) -
                                           25:int(np.round(keyPoint.pt[1])) +
                                           25,
                                           int(np.round(keyPoint.pt[0])) -
                                           25:int(np.round(keyPoint.pt[0])) +
                                           25]
                # imgplot = plt.imshow(rgb_frame_crop)
                # plt.show()
                # imgplot = plt.imshow(hsv_frame_crop)
                # plt.show()

                mask = cv2.inRange(hsv_frame_crop, keyPoint_hsv_lower,
                                   keyPoint_hsv_upper)
                # imgplot = plt.imshow(mask)
                # plt.show()

                kernel = np.ones((4, 4), np.uint8)
                closing = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
                closingHSV = cv2.bitwise_and(hsv_frame_crop,
                                             hsv_frame_crop,
                                             mask=closing)

                # imgplot = plt.imshow(closing)
                # plt.show()
                # print('----closing_hsv-----')
                # print closingHSV
                # -----how to convert to grayscale

                closing_bgr = cv2.cvtColor(closingHSV, cv2.COLOR_HSV2BGR)
                # imgplot = plt.imshow(closing_bgr)
                # plt.show()
                closing_gray = cv2.cvtColor(closing_bgr, cv2.COLOR_BGR2GRAY)

                # cv2.imwrite("/home/student/Desktop/Untitled/gray.png",closing_gray)
                # imgplot = plt.imshow(closing_gray)
                # plt.show()
                ret, thresh = cv2.threshold(closing_gray, 50, 255,
                                            cv2.THRESH_BINARY)
                # print('----closing_bgr-----')
                # print closing_bgr
                # print('----closing_gray-----')
                # print closing_gray.shape
                # print('----thresh-----')
                # print thresh
                # plt.imshow(thresh, cmap="Greys")
                # plt.show()

                im2, contours, hierarchy = cv2.findContours(
                    thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
                len_contours = len(contours)
                list_contours = np.zeros((len_contours, 1), dtype=int)
                for i in range(len(contours)):
                    list_contours[i] = len(contours[i])
                ind_maxlen = np.argmax(list_contours)
                rect = cv2.minAreaRect(contours[ind_maxlen])
                print('---orientation----')
                print rect[2]

                #-------save the orientation
                keypoints_orientation.append(rect[2])
                #     cnt = contours[i]
                #     print('--contours[i]----')
                #     print cnt
                #     rect = cv2.minAreaRect(cnt)
                #     print rect[2]

                # img, contours, hierarchy = cv2.findContours(opening,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
                # for i in range(len(contours)):
                #     cnt = contours[i]
                #     rect = cv2.minAreaRect(cnt)
                #     # self.phi_img[self.centroids_cnt] = rect[2]
                #     print('---angle')
                #     print rect[2]
                points.append(np.array([keyPoint.pt[0], keyPoint.pt[1], 1]))

        return points, keypoints_depth, keypoints_color, keypoints_orientation
Exemple #13
0
#cv2.createTrackbar("Low Sat", "Banana", 45, 255, nothing)
#cv2.createTrackbar("Low Value", "Banana", 45, 255, nothing)
#cv2.createTrackbar("Up Hue", "Banana", 25, 255, nothing)
#cv2.createTrackbar("Up Sat", "Banana", 255, 255, nothing)
#cv2.createTrackbar("Up Value", "Banana", 255, 255, nothing)

#cv2.namedWindow("Plum")
#cv2.createTrackbar("Low Hue", "Plum", 130, 255, nothing)
#cv2.createTrackbar("Low Sat", "Plum", 15, 255, nothing)
#cv2.createTrackbar("Low Value", "Plum", 0, 255, nothing)
#cv2.createTrackbar("Up Hue", "Plum", 200, 255, nothing)
#cv2.createTrackbar("Up Sat", "Plum", 180, 255, nothing)
#cv2.createTrackbar("Up Value", "Plum", 190, 255, nothing)

#Creates detector with parameters
params = cv2.SimpleBlobDetector_Params()
params.filterByArea = True
params.minArea = 1000  #will depend on camera distance
params.filterByColor = True
params.blobColor = 255  #light colours
params.filterByConvexity = True
params.minConvexity = 0.4  #higher = more uniform round   lower = more bumpy   max: 1
params.filterByInertia = True
params.minInertiaRatio = 0.4  #higher = more uniform round   lower = more oval like  max:1

detector = cv2.SimpleBlobDetector_create(params)

#Bananas are annoying cause they're a different shape so we need to make different parameters for it
params_banana = cv2.SimpleBlobDetector_Params()
params_banana.filterByArea = True
params_banana.minArea = 650
Exemple #14
0
    def findBall(self):

        # Threshold image in HSV space for reddish colors
        HSVHighThreshMAX = ballSide.BALL_MAX
        HSVHighThreshMAX[0] = 180
        imgHSVMaskHigh = cv2.inRange(self.imgHSV, ballSide.BALL_MIN,
                                     HSVHighThreshMAX)
        HSVLowThreshMIN = ballSide.BALL_MIN
        HSVLowThreshMIN[0] = 0
        HSVLowThreshMAX = ballSide.BALL_MAX
        HSVLowThreshMAX[0] = HSVLowThreshMAX[0] % 180
        imgHSVMaskLow = cv2.inRange(self.imgHSV, HSVLowThreshMIN,
                                    HSVLowThreshMAX)
        imgHSVMask = cv2.bitwise_or(imgHSVMaskLow, imgHSVMaskHigh)

        # Use HSV threshold to mask BGR image
        imgBGR = cv2.bitwise_and(self.img, self.img, mask=imgHSVMask)

        # Threshold BGR image
        img = cv2.inRange(imgBGR, ballSide.BALL_MIN_BGR, ballSide.BALL_MAX_BGR)

        # Threshold using depth
        # kDepth = np.ones((5,5),np.uint8)
        # depthMask = cv2.inRange(self.depth,110,170)
        # depthMask = cv2.dilate(depthMask,kDepth,iterations = 1)
        # img = cv2.bitwise_and(img,img,mask = depthMask)

        # Close the image
        kClose = np.ones((3, 3), np.uint8)
        img = cv2.morphologyEx(img, cv2.MORPH_OPEN, kClose)

        # Blur the image
        # img = cv2.medianBlur(img,3)
        img = cv2.GaussianBlur(img, (5, 5), 0)

        # Set up ROI
        ROIhorz = 0.85
        ROIvert = 0.7

        leftW = int(self.w * (1 - ROIhorz) / 2)
        rightW = int(self.w * (0.5 + ROIhorz / 2))
        topW = int(self.h * (1 - ROIvert) / 2)
        botW = int(self.h * (0.5 + ROIvert / 2))

        # Mask the ROI
        img[:, :leftW] = 0
        img[:, rightW:] = 0
        img[:topW, :] = 0
        img[botW:, :] = 0

        self.imgThreshBall = img

        # Setup SimpleBlobDetector parameters.
        params = cv2.SimpleBlobDetector_Params()

        params.filterByColor = False

        # Change thresholds
        params.minThreshold = 0
        params.maxThreshold = 255

        # Filter by Area.
        params.filterByArea = True
        params.minArea = 10
        params.maxArea = 90

        # Filter by Circularity
        params.filterByCircularity = True
        params.minCircularity = 0.3

        # Filter by Convexity
        params.filterByConvexity = False
        params.minConvexity = 0.15
        params.maxConvexity = 1

        # Filter by Inertia
        params.filterByInertia = False
        params.minInertiaRatio = 0.4

        # Create a detector with the parameters
        ver = (cv2.__version__).split('.')
        if int(ver[0]) < 3:
            detector = cv2.SimpleBlobDetector(params)
        else:
            detector = cv2.SimpleBlobDetector_create(params)

        blobs = detector.detect(img)

        # import pdb; pdb.set_trace()

        score = 0
        ind = 0
        if len(blobs) > 0:
            for i in range(len(blobs)):
                if blobs[i].response > score:
                    score = blobs[i].response
                    ind = i

            return np.int0(blobs[i].pt)

        else:
            return None
Exemple #15
0
ACTIVE_INTERSECT = 0

### FOR test_dilate.py ###
INPUT_MASK = os.path.join(DATA_PATH, 'outpy_blur2x2.mp4')
OUTPUT_MASK_BLOB_DETECTION = os.path.join(OUTPUT_PATH, 'output_blobs.mp4')

BLUR_KERNEL = (5, 5)

EROSION_KERNEL = (2, 2)
EROSION_ITERATIONS = 1

DILATION_KERNEL = (5, 5)
DILATION_ITERATIONS = 5

# params for blob detector
BLOB_DET_PARAMS = cv2.SimpleBlobDetector_Params()

BLOB_DET_PARAMS.minThreshold = 10
BLOB_DET_PARAMS.maxThreshold = 200
BLOB_DET_PARAMS.filterByArea = True
BLOB_DET_PARAMS.minArea = 1
#adding a filter by inertia
BLOB_DET_PARAMS.filterByInertia = True
BLOB_DET_PARAMS.minInertiaRatio = 0.001

### DIFF VIDEOS
DIFF_VIDEO_1 = os.path.join(DATA_PATH, 'outpy_blur2x2.mp4')
DIFF_VIDEO_2 = os.path.join(DATA_PATH,
                            'output_mask_blur_2x2_new_intersect.mp4')
DIFF_OUTPUT = os.path.join(DATA_PATH, 'diff_output.mp4')
def blob_detection():
    '''
    https://www.learnopencv.com/blob-detection-using-opencv-python-c/
    '''

    # get image
    img_path = r'C:\Partition\Bren\Pictures\dorritoMan.jpg'
    #img_path = r'C:\Partition\Bren\Projects\dev\python\standalone_tools\examples\BlobTest.jpg'
    #img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
    img = cv2.imread(img_path)
    img = img / 255.0

    # get bgr channels (not rgb!)
    b_channel = img[:, :, 0]
    g_channel = img[:, :, 1]
    r_channel = img[:, :, 2]

    # threshold using blue channel
    ret, mask = cv2.threshold(b_channel, 0.2, 1.0, cv2.THRESH_BINARY)
    #mask_inv = cv2.bitwise_not(mask)
    mask_inv = 1.0 - mask

    # multiply red and green channels
    img = r_channel * g_channel * mask_inv

    # normalise
    img = img - img.min()
    img = img * (1.0 / img.max())

    # adjust gamma
    img_8bit = numpy.uint8(img * 255)
    img_8bit = adjust_gamma(img_8bit, gamma=1.0)

    # blur test
    #img = cv2.blur(img,(10,20))

    # Setup SimpleBlobDetector parameters.
    params = cv2.SimpleBlobDetector_Params()

    # Change thresholds
    params.minThreshold = 10
    params.maxThreshold = 200

    # Filter by Area.
    params.filterByArea = True
    params.minArea = 10

    # Filter by Circularity
    params.filterByCircularity = True
    params.minCircularity = 0.1

    # Filter by Convexity
    params.filterByConvexity = True
    params.minConvexity = 0.87

    # Filter by Inertia
    params.filterByInertia = True
    params.minInertiaRatio = 0.01

    # Create a detector with the parameters
    ver = (cv2.__version__).split('.')
    print ver
    if int(ver[0]) < 3:
        detector = cv2.SimpleBlobDetector(params)
    else:
        detector = cv2.SimpleBlobDetector_create(params)

    # Detect blobs.
    #img_8bit = numpy.uint8(img*255)
    keypoints = detector.detect(img_8bit)
    print keypoints

    # Draw detected blobs as red circles.
    # cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
    im_with_keypoints = cv2.drawKeypoints(
        img_8bit, keypoints, numpy.array([]), (0, 0, 255),
        cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

    # Show keypoints
    cv2.imshow("Keypoints", im_with_keypoints)
    cv2.waitKey(0)
Exemple #17
0
def blob_detect(
    image,  #-- The frame (cv standard)
    hsv_min,  #-- minimum threshold of the hsv filter [h_min, s_min, v_min]
    hsv_max,  #-- maximum threshold of the hsv filter [h_max, s_max, v_max]
    blur=0,  #-- blur value (default 0)
    blob_params=None,  #-- blob parameters (default None)
    search_window=None,  #-- window where to search as [x_min, y_min, x_max, y_max] adimensional (0.0 to 1.0) starting from top left corner
    imshow=False):

    #- Blur image to remove noise
    if blur > 0:
        image = cv2.blur(image, (blur, blur))
        #- Show result
        if imshow:
            cv2.imshow("Blur", image)
            cv2.waitKey(0)

    #- Search window
    if search_window is None: search_window = [0.0, 0.0, 1.0, 1.0]

    #- Convert image from BGR to HSV
    hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)

    #- Apply HSV threshold
    mask = cv2.inRange(hsv, hsv_min, hsv_max)

    #- Show HSV Mask
    if imshow:
        cv2.imshow("HSV Mask", mask)

    #- dilate makes the in range areas larger
    mask = cv2.dilate(mask, None, iterations=2)
    #- Show HSV Mask
    if imshow:
        cv2.imshow("Dilate Mask", mask)
        cv2.waitKey(0)

    mask = cv2.erode(mask, None, iterations=2)

    #- Show dilate/erode mask
    if imshow:
        cv2.imshow("Erode Mask", mask)
        cv2.waitKey(0)

    #- Cut the image using the search mask
    mask = apply_search_window(mask, search_window)

    if imshow:
        cv2.imshow("Searching Mask", mask)
        cv2.waitKey(0)

    #- build default blob detection parameters, if none have been provided
    if blob_params is None:
        # Set up the SimpleBlobdetector with default parameters.
        params = cv2.SimpleBlobDetector_Params()

        # Change thresholds
        params.minThreshold = 0
        params.maxThreshold = 100

        # Filter by Area.
        params.filterByArea = True
        params.minArea = 30
        params.maxArea = 20000

        # Filter by Circularity
        params.filterByCircularity = True
        params.minCircularity = 0.1

        # Filter by Convexity
        params.filterByConvexity = True
        params.minConvexity = 0.5

        # Filter by Inertia
        params.filterByInertia = True
        params.minInertiaRatio = 0.5

    else:
        params = blob_params

    #- Apply blob detection
    detector = cv2.SimpleBlobDetector_create(params)

    # Reverse the mask: blobs are black on white
    reversemask = 255 - mask

    if imshow:
        cv2.imshow("Reverse Mask", reversemask)
        cv2.waitKey(0)

    keypoints = detector.detect(reversemask)

    return keypoints, reversemask
def detect(paper, m, sheet):
    defect="无"
    paper1 = paper
    time0 = time.time()
    paper_gray = cv2.cvtColor(paper, cv2.COLOR_BGR2GRAY)
    gaus = cv2.GaussianBlur(paper_gray,(5, 5), 0)
    blur = cv2.blur(paper_gray,(5, 5), 0)
    edge = cv2.Canny(gaus, 15, 30)
    edge1 = cv2.Canny(blur, 15, 30)

    # Setup SimpleBlobDetector parameters.
    params = cv2.SimpleBlobDetector_Params()

    # Change thresholds
    params.minThreshold = 3
    params.maxThreshold = 200

    # Filter by Area.
    params.filterByArea = True
    params.minArea = 50

    # Filter by Circularity
    params.filterByCircularity = True
    params.minCircularity = 0.1

    # Filter by Convexity
    params.filterByConvexity = True
    params.minConvexity = 0.87

    # Filter by Inertia
    params.filterByInertia = True
    params.minInertiaRatio = 0.01

    # Create a detector with the parameters
    ver = (cv2.__version__).split('.')
    if int(ver[0]) < 3:
        detector = cv2.SimpleBlobDetector(params)
    else:
        detector = cv2.SimpleBlobDetector_create(params)

    # Detect blobs.
    keypoints = detector.detect(paper_gray)

    if (len(keypoints)):
        defect='有'
        paper = cv2.drawKeypoints(paper_gray, keypoints, np.array([]), (0, 0, 255),
                                  cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
        hole_pos = []

        i = 0
        for x1 in keypoints[:]:
            hole_pos.append([int(keypoints[i].pt[0]), int(keypoints[i].pt[1])])
            i = i + 1


        cv2.imwrite("./result/hole/" + str(m) + ".jpg", paper)
        sheet.cell(row=m+1, column=5, value=len(hole_pos))
        sheet.cell(row=m+1, column=6, value=str(hole_pos))

    else:
        sheet.cell(row=m+1, column=5, value=0)
        sheet.cell(row=m+1, column=6, value='NONE')


    #闭操作
    dilated = cv2.dilate(edge, None, iterations=2)  # 膨胀
    eroded = cv2.erode(dilated, None, iterations=1)  # 腐蚀

    lines = cv2.HoughLinesP(eroded,1,np.pi/180,100,minLineLength=80,maxLineGap=20)#100 80 20
    if lines is None:
        #print('No folds!')
        sheet.cell(row=m+1, column=3, value=0)
        sheet.cell(row=m+1, column=4, value='NONE')
        sheet.cell(row=m+1, column=7, value=0)
        sheet.cell(row=m+1, column=8, value='NONE')

    else:
        defect='有'
        lines1 = lines[:, 0, :]  # 提取为二维
        x = lines1.shape[0]
        size = paper.shape

        foldxy = []
        if x>200:
            lines = cv2.HoughLinesP(edge1, 1, np.pi / 180, 20, minLineLength=2, maxLineGap=5)
            lines1 = lines[:, 0, :]  # 提取为二维
            for x1, y1, x2, y2 in lines1[:]:
                if (x1 < size[1] * 0.05 and x2 < size[1] * 0.05) or (x1 > size[1] * 0.95 and x2 > size[1] * 0.95) or (
                        y1 < size[0] * 0.05 and y2 < size[0] * 0.05) or (y1 > size[0] * 0.95 and y2 > size[0] * 0.95):
                    pass
                else:
                    foldxy.append([x1, y1, x2, y2])
            #print(len(foldxy))
            for x1, y1, x2, y2 in foldxy[:]:
                cv2.line(paper1, (x1, y1), (x2, y2), (255, 255, 0), 1)
                cv2.line(paper, (x1, y1), (x2, y2), (255, 255, 0), 1)
            cv2.imwrite("./result/stripe/" + str(m) + ".jpg", paper1)
            sheet.cell(row=m+1, column=3, value=0)
            sheet.cell(row=m+1, column=4, value='NONE')
            sheet.cell(row=m+1, column=7, value=len(foldxy))
            sheet.cell(row=m+1, column=8, value=str(foldxy))
        else:
            for x1, y1, x2, y2 in lines1[:]:
                if (x1 < size[1] * 0.05 and x2 < size[1] * 0.05) or (x1 > size[1] * 0.95 and x2 > size[1] * 0.95) or (
                                y1 < size[0] * 0.05 and y2 < size[0] * 0.05) or (
                        y1 > size[0] * 0.95 and y2 > size[0] * 0.95):
                    pass
                else:
                    foldxy.append([x1, y1, x2, y2])

            for x1, y1, x2, y2 in foldxy[:]:
                cv2.line(paper1, (x1, y1), (x2, y2), (0, 255, 0), 2)
                cv2.line(paper, (x1, y1), (x2, y2), (0, 255, 0), 2)
            cv2.imwrite("./result/fold/" + str(m) + ".jpg", paper1)
            sheet.cell(row=m+1, column=3, value=len(foldxy))
            sheet.cell(row=m+1, column=4, value=str(foldxy))
            sheet.cell(row=m+1, column=7, value=0)
            sheet.cell(row=m+1, column=8, value='NONE')
    time1 = time.time()
    print(str(m)+'.time: %.3f' %(time1-time0))
    sheet.cell(row=m+1, column=9, value=defect)
    #cv2.imshow(str(m), paper)
    if defect=='有':
        cv2.imwrite("./result/defect_yes/" + str(m) + ".jpg", paper)
    else:
        cv2.imwrite("./result/defect_no/" + str(m) + ".jpg", paper)
    return paper
Exemple #19
0
def process_image(im):
    global previous_measurement, false_positives

    # Setup SimpleBlobDetector parameters.
    params = cv2.SimpleBlobDetector_Params()

    # Change thresholds
    params.minThreshold = 5
    params.maxThreshold = 200

    # Filter by Area.
    params.filterByArea = True
    params.minArea = 100
    params.maxArea = 1000000000

    # Filter by Circularity
    params.filterByCircularity = False
    params.minCircularity = 0.1

    # Filter by Convexity
    params.filterByConvexity = False
    params.minConvexity = 0.87

    # Filter by Inertia
    params.filterByInertia = False
    params.minInertiaRatio = 0.01

    # Create a detector with the parameters
    detector = cv2.SimpleBlobDetector_create(params)

    # Detect blobs.
    keypoints = detector.detect(im)

    #print("{} blob(s) found".format(len(keypoints)))

    if len(keypoints) == 0:
        return None, None, False

    # Get largest keypoint
    largest = None
    for kp in keypoints:
        if largest is None or kp.size > largest.size:
            largest = kp

    #print("Largest keypoint at {}".format(largest.pt))

    # Calculate offset
    x, y = largest.pt
    x_size, y_size = len(im[0]), len(im)
    #print("Image size is {}x{}".format(x_size, y_size))
    offset = (x / x_size)-0.5, (y / y_size)-0.5

    #print("Offset from center is {}".format(offset))

    # Detect false positive if x and y coordinate more then 50px off
    ignore = False
    if previous_measurement is not None:
        if abs(previous_measurement.pt[0] - x) > false_positive_offset or \
                abs(previous_measurement.pt[1] - y) > false_positive_offset or \
                abs(previous_measurement.size - largest.size) > false_positive_offset:
            ignore = True
            false_positives.append(largest)
        else:
            previous_measurement = largest
    else:
        previous_measurement = largest

    if ignore and len(false_positives) > 5:
        print("Resetting false positives")
        false_positives = []
        previous_measurement = largest

    return largest, offset, ignore
    mins: np.ndarray
    maxes: np.ndarray
    mins, maxes = adjusted_mins_maxes()
    if not calibrating:
        ranges[active_user, 0] = mins
        ranges[active_user, 1] = maxes
    else:
        for n in range(3):
            if mins[n] < ranges[active_user, 0, n]:
                ranges[active_user, 0, n] = mins[n]
            if maxes[n] > ranges[active_user, 1, n]:
                ranges[active_user, 1, n] = maxes[n]
    calibrating = True


p = cv.SimpleBlobDetector_Params()
p.filterByColor = False
p.filterByConvexity = False
p.filterByArea = True
p.minArea = 100
detector = cv.SimpleBlobDetector_create(p)

while True:
    _, large_frame = cap.read()
    resized_unflipped = cv.resize(large_frame, (0, 0), fx=0.5, fy=0.5)
    resized_flipped = cv.flip(resized_unflipped, 1)
    resized_flipped_hsv = cv.cvtColor(resized_flipped, cv.COLOR_BGR2HSV)
    fs = resized_flipped_hsv.shape
    center_row = fs[0] // 2
    center_column = fs[1] // 2
    cv.rectangle(resized_flipped, (center_column - 5, center_row - 5), (center_column + 5, center_row + 5), (255, 255, 255), 1)
Exemple #21
0
    def __init__(self, parent):

        self._BITMAP_NIGHT = wx.Bitmap(u"night.jpg", wx.BITMAP_TYPE_ANY)
        self._BITMAP_DAY = wx.Bitmap(u"day.jpg", wx.BITMAP_TYPE_ANY)
        self._BITMAP_DIM = wx.Bitmap(u"dim.jpg", wx.BITMAP_TYPE_ANY)
        self._BITMAP_BRIGHT = wx.Bitmap(u"bright.jpg", wx.BITMAP_TYPE_ANY)
        self._BITMAP_OFF = wx.Bitmap(u"off.jpg", wx.BITMAP_TYPE_ANY)
        self._BITMAP_NONE = wx.Bitmap(u"AHC.jpg", wx.BITMAP_TYPE_ANY)

        ###########################################################################################################################################################

        wx.Frame.__init__(self,
                          parent,
                          id=wx.ID_ANY,
                          title=wx.EmptyString,
                          pos=wx.DefaultPosition,
                          size=wx.Size(840, 490),
                          style=0 | wx.TAB_TRAVERSAL)

        self.SetSizeHintsSz(wx.DefaultSize, wx.DefaultSize)

        bsH1 = wx.BoxSizer(wx.HORIZONTAL)

        self._bitmapCapture = wx.StaticBitmap(self, wx.ID_ANY,
                                              self._BITMAP_NONE,
                                              wx.DefaultPosition,
                                              wx.Size(640, 480), 0)
        bsH1.Add(self._bitmapCapture, 0, wx.ALL, 5)

        bsV11 = wx.BoxSizer(wx.VERTICAL)

        self._bitmapLightState = wx.StaticBitmap(self, wx.ID_ANY,
                                                 self._BITMAP_BRIGHT,
                                                 wx.DefaultPosition,
                                                 wx.DefaultSize, 0)
        bsV11.Add(self._bitmapLightState, 0,
                  wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, 5)

        self._cbOverrideAutoDim = wx.CheckBox(self, wx.ID_ANY, u"Override",
                                              wx.DefaultPosition,
                                              wx.DefaultSize, 0)
        bsV11.Add(self._cbOverrideAutoDim, 0,
                  wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, 5)

        _rboxDimChoices = [u"DIM", u"BRIGHT"]
        self._rboxDim = wx.RadioBox(self, wx.ID_ANY, wx.EmptyString,
                                    wx.DefaultPosition, wx.DefaultSize,
                                    _rboxDimChoices, 1, wx.RA_SPECIFY_COLS)
        self._rboxDim.SetSelection(0)
        self._rboxDim.Enable(False)
        bsV11.Add(self._rboxDim, 0, wx.ALL | wx.EXPAND, 5)

        self._bitmapDayNightMode = wx.StaticBitmap(self, wx.ID_ANY,
                                                   self._BITMAP_NIGHT,
                                                   wx.DefaultPosition,
                                                   wx.DefaultSize, 0)
        bsV11.Add(self._bitmapDayNightMode, 0,
                  wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, 5)

        self._cbOverrideAutoMode = wx.CheckBox(self, wx.ID_ANY, u"Override",
                                               wx.DefaultPosition,
                                               wx.DefaultSize, 0)
        bsV11.Add(self._cbOverrideAutoMode, 0,
                  wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, 5)

        self._buttonClose = wx.Button(self, wx.ID_ANY, u"Close",
                                      wx.DefaultPosition, wx.DefaultSize, 0)
        bsV11.Add(self._buttonClose, 0, wx.ALL | wx.EXPAND, 5)

        bsH1.Add(bsV11, 1, wx.EXPAND, 5)

        self.SetSizer(bsH1)
        self.Layout()

        self.Centre(wx.BOTH)

        # Connect Events
        self._cbOverrideAutoDim.Bind(wx.EVT_CHECKBOX, self.OverrideAutoDim)
        self._rboxDim.Bind(wx.EVT_RADIOBOX, self.ManualDimAndBright)
        self._cbOverrideAutoMode.Bind(wx.EVT_CHECKBOX, self.OverrideAutoMode)
        self._buttonClose.Bind(wx.EVT_BUTTON, self.onClose)

        ###########################################################################################################################################################

        thresholdStep = 8.0
        minThreshold = 191.0
        maxThreshold = 255.0
        minRepeatability = 2
        minDistBetweenBlobsProportional = 0.02
        minBlobAreaProportional = 0.0001
        maxBlobAreaProportional = 0.2
        minBlobCircularity = 0.2
        imageSize = (640, 480)
        self._roi_minX = int(0.4 * imageSize[0])
        self._roi_maxX = int(1.0 * imageSize[0])
        self._roi_minY = int(0.5 * imageSize[1])
        self._roi_maxY = int(0.9 * imageSize[1])

        minDistBetweenBlobs = min(imageSize) * minDistBetweenBlobsProportional
        area = imageSize[0] * imageSize[1]
        minBlobArea = area * minBlobAreaProportional
        maxBlobArea = area * maxBlobAreaProportional

        detectorParams = cv2.SimpleBlobDetector_Params()

        detectorParams.minDistBetweenBlobs = minDistBetweenBlobs

        detectorParams.thresholdStep = thresholdStep
        detectorParams.minThreshold = minThreshold
        detectorParams.maxThreshold = maxThreshold

        detectorParams.minRepeatability = minRepeatability

        detectorParams.filterByArea = True
        detectorParams.minArea = minBlobArea
        detectorParams.maxArea = maxBlobArea

        detectorParams.filterByColor = True
        detectorParams.blobColor = 255

        detectorParams.filterByCircularity = True
        detectorParams.minCircularity = minBlobCircularity

        detectorParams.filterByInertia = False

        detectorParams.filterByConvexity = False

        self._detector = cv2.SimpleBlobDetector_create(detectorParams)

        self._running = True
        self._pause = False  # For pausing Capture Loop
        self._lightStates = [1] * 5  # 0: Not Detected & 1: Detected
        self._nightMode = True
        self._modeOverride = False
        self._currentBitmapLightState = 1  # 0 : OFF, 1 : DIM, 2 : BRIGHT
        self._currentBitmapDayNightMode = 0  # 0 : Night, 1 : Day

        self._pi = pigpio.pi()

        self._capture = cv2.VideoCapture(0)
        self._bitmapCapture.SetBitmap(self._BITMAP_NONE)
        self._captureThread = threading.Thread(target=self._runCaptureLoop)
        self._captureThread.start()
Exemple #22
0
import cv2 as cv
import numpy as np
if __name__ == '__main__':

    img = cv.imread('Imagem.jpg')
    imgGray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)

    imgCanny = cv.Canny(imgGray,100,200)

    parameters = cv.SimpleBlobDetector_Params()

    #change thresholds
    parameters.minThreshold = 50
    parameters.maxThreshold = 250

    #Filter by area
    parameters.filterByArea = True
    parameters.minArea = 10

    #filter by circularity
    parameters.filterByCircularity = True
    parameters.minCircularity = 0.1

    #filter by convexity
    parameters.filterByConvexity = True
    parameters.minConvexity = 0.50

    #filter by inertia
    parameters.filterByInertia = True
    parameters.minInertiaRatio = 0.01
Exemple #23
0
    def callback(self, image_msg):
        if self.state == "end":
            returnr


        if self.mode == 'tuning':
            cv2.namedWindow('ColorFilter')

            ilowH, ihighH, ilowS, ihighS, ilowV, ihighV = self.filter_tuning

            # create trackbars for color change
            cv2.createTrackbar('lowH','ColorFilter',ilowH,255,callback)
            cv2.createTrackbar('highH','ColorFilter',ihighH,255,callback)

            cv2.createTrackbar('lowS','ColorFilter',ilowS,255,callback)
            cv2.createTrackbar('highS','ColorFilter',ihighS,255,callback)

            cv2.createTrackbar('lowV','ColorFilter',ilowV,255,callback)
            #cv2.createTrackbar('highV','image',ihighV,255,callback)


            # get trackbar positions
            ilowH = cv2.getTrackbarPos('lowH', 'ColorFilter')
            ihighH = cv2.getTrackbarPos('highH', 'ColorFilter')
            ilowS = cv2.getTrackbarPos('lowS', 'ColorFilter')
            ihighS = cv2.getTrackbarPos('highS', 'ColorFilter')
            ilowV = cv2.getTrackbarPos('lowV', 'ColorFilter')
            #ihighV = cv2.getTrackbarPos('highV', 'ColorFilter')

            self.filter_tuning = ilowH, ihighH, ilowS, ihighS, ilowV, ihighV

        else:
            ilowH, ihighH, ilowS, ihighS, ilowV, ihighV = self.filter_color[self.detecting_color]

        if self.selecting_sub_image == "compressed":
            np_arr = np.fromstring(image_msg.data, np.uint8)
            frame = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
        else:
            frame = self._cv_bridge.imgmsg_to_cv2(image_msg, "bgr8")
        # HSV
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        lower_hsv = np.array([ilowH, ilowS, ilowV])
        higher_hsv = np.array([ihighH, ihighS, ihighV])
        mask = cv2.inRange(hsv, lower_hsv, higher_hsv)
        ColorFilter = frame

        ColorFilter = cv2.bitwise_and(ColorFilter, ColorFilter, mask=mask)

        cv_image_gray = cv2.cvtColor(ColorFilter, cv2.COLOR_RGB2GRAY)
        ret,cv_image_binary = cv2.threshold(cv_image_gray,1,255,cv2.THRESH_BINARY_INV)
        # cv2.imshow('image', cv_image_binary), cv2.waitKey(1)

        params=cv2.SimpleBlobDetector_Params()
        # Change thresholds
        params.minThreshold = 0
        params.maxThreshold = 255

        # Filter by Area.
        params.filterByArea = True
        params.minArea = 100
        params.maxArea = 800

        # Filter by Circularity
        params.filterByCircularity = True
        params.minCircularity = 0.6

        # Filter by Convexity
        params.filterByConvexity = True
        params.minConvexity = 0.6


        det=cv2.SimpleBlobDetector_create(params)
        keypts=det.detect(cv_image_binary)
        frame=cv2.drawKeypoints(frame,keypts,np.array([]),(0,255,255),cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

        mean_x = 0.0
        mean_y = 0.0


        col1 = 300
        col2 = 640

        low1 = 0
        low2 = 200
        frame = cv2.line(frame, (col1, low1), (col1, low2), (255, 255, 0), 5)
        frame = cv2.line(frame, (col1, low2), (col2, low2), (255, 255, 0), 5)
        frame = cv2.line(frame, (col2, low2), (col2, low1), (255, 255, 0), 5)
        frame = cv2.line(frame, (col2, low1), (col1, low1), (255, 255, 0), 5)

        # if detected more than 1 red light
        for i in range(len(keypts)):
            point_col = int(keypts[i].pt[0])
            point_low = int(keypts[i].pt[1])
            print 'detected'


            if point_col > col1 and point_low < low2:
                print 'yes'
                if self.detecting_color == 'green':
                    if self.red_count > 0:
                        self.red_count = 0
                        self.state = "end"
                        message = Traffic_light()
                        message.color = "fast"
                        message.position_x = point_col
                        message.position_y = point_low
                        self._pub.publish(message)
                    else:
                        self.red_count = 0
                        message = Traffic_light()
                        message.color = "green"
                        message.position_x = point_col
                        message.position_y = point_low
                        self._pub.publish(message)

                if self.detecting_color == 'red':
                    self.red_count = 30
                    message = Traffic_light()
                    message.color = self.detecting_color
                    message.position_x = point_col
                    message.position_y = point_low
                    self._pub.publish(message)

        if self.red_count == 1:
            message = Traffic_light()
            message.color = "green"
            message.position_x = 100
            message.position_y = 100
            self._pub.publish(message)


        if self.red_count > 0:
            self.red_count -= 1

        if self.detecting_color == 'red':
            self.detecting_color = 'green'
        elif self.detecting_color == 'green':
            self.detecting_color = 'red'

        # showing image
        if self.showing_image == 'yes':
            cv2.imshow("ColorFilter", ColorFilter), cv2.waitKey(1)
            cv2.imshow("detecting stop bar", frame), cv2.waitKey(1)
    def image_callback(self, data):
        try:
            image = self.bridge.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError as e:
            print(e)

        # print("hello")

        # print(nav_data)


# # mark features in cartesian image
#     orb = cv2.ORB_create()
#     kp = orb.detect(cartesian_image, None)
#     kp, des = orb.compute(cartesian_image, kp)
#     print(type(kp[0]))
#     marked_image = cv2.drawKeypoints(cartesian_image,kp,None,color=(0,255,0), flags=0)

# Setup SimpleBlobDetector parameters.
        params = cv2.SimpleBlobDetector_Params()

        # Change thresholds
        # params.minThreshold = 10;
        # params.maxThreshold = 200;

        # Filter by Area.
        params.filterByArea = True
        params.minArea = 1

        # Filter by Circularity
        # params.filterByCircularity = True
        # params.minCircularity = 0.1

        # Filter by Convexity
        # params.filterByConvexity = True
        # params.minConvexity = 0.87

        # Filter by Inertia
        # params.filterByInertia = True
        # params.minInertiaRatio = 0.3

        # Create a detector with the parameters
        ver = (cv2.__version__).split('.')
        if int(ver[0]) < 3:
            detector = cv2.SimpleBlobDetector(params)
        else:
            detector = cv2.SimpleBlobDetector_create(params)
        image = cv2.threshold(image, 150, 255, cv2.THRESH_BINARY)[1]

        keypoints = detector.detect(image)
        pts = [[float(p.pt[0]), float(p.pt[1])] for p in keypoints]
        # print(pts)

        # transform feature points from image frame to cartesian camera frame
        if (self.max_fov):
            origin_row = self.origin_row
            origin_col = self.origin_col
            # print(pts)
            polar_points = []
            for point in pts:
                r = (self.height -
                     point[1]) * self.max_range / self.height  # in metres
                theta = (point[0] - self.origin_col) * self.max_fov / (
                    self.origin_col)  # in degrees
                polar_point = self.cartesian_transform([r, theta])
                polar_points.append(polar_point)

        my_awesome_pointcloud = PointCloud()

        # transform points to global frame, first rotate, then translate
        final_points = []
        yaw = self.nav_yaw
        translation_vector = np.array([self.nav_x, self.nav_y])
        for point in polar_points:
            rotation_matrix = np.array([[math.cos(yaw), -math.sin(yaw)],
                                        [math.sin(yaw),
                                         math.cos(yaw)]])
            point_vector = np.array([point[0], point[1]])
            rotated_point = np.matmul(rotation_matrix, point_vector)
            translated_point = np.add(rotated_point, translation_vector)
            translated_point = np.append(translated_point, [0.0])
            translated = translated_point.tolist()
            my_awesome_pointcloud.points.append(Point32(*translated))
            final_points.append(translated)

        #header
        header = std_msgs.msg.Header()
        header.stamp = rospy.Time.now()
        header.frame_id = 'base_link'

        my_awesome_pointcloud.header = header
        #create pcl from points
        #scaled_polygon_pcl = pcl2.create_cloud_xyz32(header, final_points)
        #publish4
        self.pcl_pub.publish(my_awesome_pointcloud)
        rospy.loginfo("happily publishing sample pointcloud.. !")
        #self.pcl_pub.publish(scaled_polygon_pcl)

        # print(pts)
        marked_image = cv2.drawKeypoints(image,
                                         keypoints,
                                         None,
                                         color=(0, 255, 0),
                                         flags=0)

        # hello_float = Float64MultiArray()
        # hello_float.data = pts
        # self.features_pub.publish(hello_float)

        # show image
        cv2.imshow("Image window", marked_image)
        cv2.waitKey(3)

        try:
            self.image_pub.publish(
                self.bridge.cv2_to_imgmsg(marked_image, "bgr8"))
        except CvBridgeError as e:
            print(e)
Exemple #25
0
def circleposition(cap,transform,newcameramtx,mtx,dist,mask,maskcorners):
    thresh = 0
    maxValue = 255
    #print "CAP type" + str(type(cap))
    [oldx,oldy] = [0,0]
    #print "maskcorners %s" % str(maskcorners)
    params = cv2.SimpleBlobDetector_Params()
    
    # Change thresholds
    #params.minThreshold = 10;
    #params.maxThreshold = 200;
     
    # Filter by Area.
    params.filterByArea = True
    params.minArea = 75
     
    # Filter by Circularity
    #params.filterByCircularity = True
    #params.minCircularity = 0.8
     
    # Filter by Convexity
    params.filterByConvexity = True
    params.minConvexity = 0.87
     
    # Filter by Inertia
    #params.filterByInertia = True
    #params.minInertiaRatio = 0.01     
    ret, frame = cap.read()
    detector = cv2.SimpleBlobDetector_create(params)
    h,  w = frame.shape[:2]
    done = False
    l=[[1,1],[1,1],[1,1]]
    while True:
        time1 = time.clock()
        ret, frame = cap.read()
        if frame is None:
            return
            
        #time1 = time.clock()
        frame = cv2.undistort(frame, mtx, dist, None, newcameramtx)
        #time2 = time.clock()
        #print 'undistort %0.3f' % (time2-time1)
        #cv2.imshow('frame',frame)
        #cv2.waitKey(3000)
        ffframe=frame
        #fframe=cv2.flip(frame,0)
        #ffframe=cv2.flip(fframe,1)
        
        ulx = maskcorners[0][0] #-60 ##sorg for ikke at ryge udenfor billede
        uly = maskcorners[0][1] #-60
        brx = maskcorners[2][0] #+ 60
        bry = maskcorners[2][1] #+ 60
        
        #ffframe = ffframe[uly:bry,ulx:brx]
        #cv2.imshow('cropped',ffframe)
        #cv2.waitKey(3000)
               
        #time1 = time.clock()
        gray = cv2.cvtColor(ffframe, cv2.COLOR_BGR2GRAY)
        #time2 = time.clock()
        #print 'cvtColur %0.3f' % (time2-time1)
        #cv2.imshow('gray',gray)
        #cv2.waitKey(10)
        #time1 = time.clock()
        th, dst = cv2.threshold(gray, thresh, maxValue, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
        #time2 = time.clock()
        #print 'treshold %0.3f' % (time2-time1)
        #cv2.imshow('dst',dst)
        #cv2.waitKey(10)
        out=dst
        #out = np.zeros_like(dst)
        #out[mask] = dst[mask]
        #cv2.imshow('mask',out)
        #cv2.waitKey(10)

        # Detect blobs.
        dtime1 = time.clock()
        keypoints = detector.detect(out)
        dtime2 = time.clock()
        print('detector %0.6f' % (dtime2-dtime1))
        #print "keypoints" + str(keypoints)
        if len(keypoints) > 0: 
            keypoint = [keypoints[-1]]
        else:
            keypoint = None
            
        #print "keypoint" + str(keypoint)
        if keypoint is not None:
            #print keypoint[0].pt
            circle = keypoint[0].pt
            #print "im alive"
            im_with_keypoints = cv2.drawKeypoints(ffframe, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
            cv2.imshow("im_with_keypoints", im_with_keypoints)
            #cv2.imshow("out", out)
            
            #cv2.waitKey(1)
            print("point diff")
            print(np.absolute(circle[0]-l[2][0])) 
            print(np.absolute(circle[1]-l[2][1]))
            #if (np.absolute(c) > 12 or np.absolute(circle[1]-l[2][1]) > 12): ## and (np.absolute(circle[0]-l[2][0]) < 100 or np.absolute(circle[1]-l[2][1]) < 100):
            l.append([int(circle[0]),int(circle[1])])
            l=l[1:]
            #else:
            #    raise Exception("hej")
        else:
            print("else")
            continue
        
        adpoint = l[2]
        #adpoint[0]=adpoint[0]+ulx
        #adpoint[1]=adpoint[1]+uly
        point = np.array([adpoint],dtype='float32')
        point = np.array([point])
        #time1 = time.clock()
        #pointUndist = cv2.undistortPoints(point, mtx, dist, None, newcameramtx)
        #time2= time.clock()
        #print 'undistortPoints %0.3f' % (time2-time1)
        [[[pux,puy]]] = point #Undist
        #pux=w-pux
        #puy=h-puy
        pointUndist = np.array([[[pux,puy]]],dtype='float32')
        pointOut = cv2.perspectiveTransform(pointUndist, transform)   

        [[[x,y]]]=point
        #[[[xu,yu]]]=pointUndist
        [[[xo,yo]]]=pointOut
        #yield [[x,y],[xo,yo],[xu,yu]]
        
        time2 = time.clock()
        print('findcircle clocktime %0.6f' % (time2-time1))

        yield [xo,yo]
Exemple #26
0
def analyze():
    global frame, frame_cnt, frame_cnt_cond
    global analysis, analysis_cnt
    global analysis_time, analysis_delta
    global align, finish, exit
    global ana_obj, ana_pos, ana_pas
    global ana_idx, ana_seq

    global thr_val

    this = None
    this_cnt = 0

    rx0, ry0, rx1, ry1 = ana_roi
    # hsv_grid = [np.array([0,0,30]), np.array([200,50,160])]
    # hsv_high = [np.array([0,40,0]), np.array([72,255,255])]
    kern_open = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
    kern_close = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))

    par = cv2.SimpleBlobDetector_Params()
    par.minThreshold = 50
    par.maxThreshold = 400

    par.filterByArea = True
    par.minArea = 1000
    par.maxArea = 70000

    par.filterByColor = False

    par.filterByCircularity = False
    par.minCircularity = 0.2

    par.filterByConvexity = False
    par.minConvexity = 0.8

    par.filterByInertia = False
    par.minInertiaRatio = 0.5

    detector = cv2.SimpleBlobDetector_create(par)

    while not exit:
        with frame_cnt_cond:
            while frame_cnt == this_cnt:
                frame_cnt_cond.wait()
            this = frame.copy()
            this_cnt = frame_cnt

        mark = time()

        roi = this[ry0:ry1, rx0:rx1]
        hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
        # msk_grid = cv2.inRange(hsv, hsv_grid[0], hsv_grid[1])
        # msk_high = cv2.inRange(hsv, hsv_high[0], hsv_high[1])
        # msk = cv2.bitwise_not(cv2.bitwise_or(msk_grid, msk_high))
        # res = cv2.bitwise_and(roi, roi, mask=msk)
        # h, s, v = hsv[:, :, 0], hsv[:, :, 1], hsv[:, :, 2]

        h, s, v = cv2.split(hsv)

        ret, ht = cv2.threshold(h, thr_val[0], 255, cv2.THRESH_BINARY)
        # ret, ht = cv2.threshold(ht, thr_val[1], 0, cv2.THRESH_BINARY_INV)
        ret, st = cv2.threshold(s, thr_val[2], 255, cv2.THRESH_BINARY_INV)
        ret, vt = cv2.threshold(v, thr_val[3], 255, cv2.THRESH_BINARY_INV)

        # thr = cv2.merge((ht, st, vt))
        thr = cv2.merge((ht, st, vt))
        # hsv1 = cv2.cvtColor(thr, cv2.COLOR_BGR2HSV)
        # h1, s1, v1 = cv2.split(hsv1)
        # thr = cv2.threshold(v1, 200, 255, cv2.THRESH_BINARY)
        # thr = cv2.cvtColor(v1, cv2.COLOR_GRAY2BGR)
        # cv2.bitwise_and(thr, thr, ht, mask = ht)
        # cv2.bitwise_and(thr, thr, st, mask = st)

        thr = cv2.bitwise_and(thr, cv2.cvtColor(vt, cv2.COLOR_GRAY2BGR))
        thr = cv2.bitwise_and(thr, cv2.cvtColor(st, cv2.COLOR_GRAY2BGR))
        thr = cv2.bitwise_and(thr, cv2.cvtColor(ht, cv2.COLOR_GRAY2BGR))

        # cv2.bitwise_and(, thr, mask = st)
        # cv2.bitwise_and(thr, thr, mask = ht)

        # ret, thr = cv2.threshold(s, 80, 250, cv2.THRESH_TOZERO)
        # gry = cv2.cvtColor(thr, cv2.COLOR_BGR2GRAY)

        mor = cv2.morphologyEx(thr, cv2.MORPH_OPEN, kern_open)
        img = cv2.morphologyEx(mor, cv2.MORPH_CLOSE, kern_close)
        # img = cv2.cvtColor(mor, cv2.COLOR_GRAY2RGB)
        # img = thr

        kpt = detector.detect(img)
        img = cv2.drawKeypoints(img, kpt, np.array([]), \
                                (255, 255, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

        # filter channel display options
        if analyze_filter_id == 0:
            img = h
        elif analyze_filter_id == 1:
            img = s
        elif analyze_filter_id == 2:
            img = v
        elif analyze_filter_id == 3:
            img = ht
        elif analyze_filter_id == 4:
            img = st
        elif analyze_filter_id == 5:
            img = vt

        # split = align - rx0
        # limit = finish - rx0
        # purge = ignore - rx0

        pos = [0] * len(kpt)

        for idx, kp in enumerate(kpt):
            xf, yf = kp.pt
            x, y = int(round(xf)), int(round(yf))
            # interesting = False

            pos[idx] = (x, y)

            # print(pos[idx])
            radius = 20
            if kp.size < thr_val[4]:
                ovtext(img, "%3d" % kp.size, (x - 30, y - 15), (255, 0, 0))
                cv2.line(img, (x - radius, y), (x + radius, y), (255, 0, 0), 2)
                cv2.line(img, (x, y - radius), (x, y + radius), (255, 0, 0), 2)
            else:
                if 150 < pos[idx][0] < 570 and 150 < pos[idx][1] < 570:
                    ovtext(img, "%3d" % kp.size, (x - 30, y - 15), (0, 0, 255))
                    # crosshair
                    cv2.line(img, (x - radius, y), (x + radius, y), (0, 0, 255), 7)
                    cv2.line(img, (x, y - radius), (x, y + radius), (0, 0, 255), 7)
                    ana_pos = pos[idx]
                else:
                    ovtext(img, "%3d" % kp.size, (x - 30, y - 15), (255, 255, 255))

        analysis = img
        analysis_cnt += 1

        prev = analysis_time
        analysis_time = time()
        analysis_delta = (analysis_time - mark) * 1000
Exemple #27
0
def detect(camera):
    rawCapture = PiRGBArray(camera)

    # Takes picture
    camera.capture(rawCapture, format="bgr")
    img = rawCapture.array

    # Converting to HSV
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

    # Color Bounds and Masks for red, blue and green
    lower_red_u = np.array([152, 15, 114])
    upper_red_u = np.array([175, 120, 255])
    mask_red_u = cv2.inRange(hsv, lower_red_u, upper_red_u)

    lower_red_l = np.array([0, 10, 114])
    upper_red_l = np.array([17, 120, 255])
    mask_red_l = cv2.inRange(hsv, lower_red_l, upper_red_l)

    mask_red = cv2.bitwise_or(mask_red_l, mask_red_u)

    lower_blue = np.array([96, 10, 114])
    upper_blue = np.array([125, 120, 255])
    mask_blue = cv2.inRange(hsv, lower_blue, upper_blue)

    lower_green = np.array([50, 10, 114])
    upper_green = np.array([80, 120, 255])
    mask_green = cv2.inRange(hsv, lower_green, upper_green)

    kernal = np.ones((5, 5), np.uint8)
    mask_red = cv2.dilate(mask_red, kernal, iterations=2)
    mask_blue = cv2.dilate(mask_blue, kernal, iterations=2)
    mask_green = cv2.dilate(mask_green, kernal, iterations=2)

    # Blob implementation
    params = cv2.SimpleBlobDetector_Params()

    params.minThreshold = 20
    params.maxThreshold = 200

    params.filterByArea = False
    params.minArea = .1

    params.filterByColor = False
    params.blobColor = 200

    params.filterByCircularity = False
    params.minCircularity = 0.1

    params.filterByConvexity = False
    params.minConvexity = 0.5

    params.filterByInertia = False
    params.minInertiaRatio = 0.01

    mask = cv2.bitwise_or(mask_red, mask_blue)
    mask = cv2.bitwise_or(mask, mask_green)

    detector = cv2.SimpleBlobDetector(params)
    keypoints_red = detector.detect(mask_red)
    keypoints_blue = detector.detect(mask_blue)
    keypoints_green = detector.detect(mask_green)
    keypoints = []

    try:
        for key in keypoints_red:
            keypoints.append([key.pt[0], key.pt[1], 0])
    except:
        print "Red LED not found"
    try:
        for key in keypoints_blue:
            keypoints.append([key.pt[0], key.pt[1], 1])
    except:
        print 'Blue LED not found'
    try:
        for key in keypoints_green:
            keypoints.append([key.pt[0], key.pt[1], 2])
    except:
        print 'Green LED not found'

    return keypoints
Exemple #28
0
def main():
    global new_img_available, low_hue, low_sat, low_val, high_hue, high_sat, high_val, color, min_area, max_area, min_circularity, max_circularity, min_inertia_ratio, max_inertia_ratio, min_convexity, max_convexity, min_dist_between_blobs

    rospy.init_node("go_to_green_square")

    # Create a subscriber that starts listening for incoming messages
    # of the type Image on the usb_cam/image_raw topic
    rospy.Subscriber("usb_cam/image_raw", Image, get_image)
    blob_position_pub = rospy.Publisher("blob_location", Point, queue_size=1)

    bridge = cv_bridge.core.CvBridge()

    cv2.namedWindow("Thresholded image", cv2.WINDOW_AUTOSIZE)
    cv2.namedWindow('blob_detector/go_to_green_square')
    cv2.namedWindow("Blob parameters", cv2.WINDOW_AUTOSIZE)

    # Here we create the sliders that regulate the range of colour we wish to filter
    cv2.createTrackbar("low_hue", "Thresholded image", low_hue, 179,
                       update_low_hue)
    cv2.createTrackbar("high_hue", "Thresholded image", high_hue, 179,
                       update_high_hue)
    cv2.createTrackbar("low_sat", "Thresholded image", low_sat, 255,
                       update_low_sat)
    cv2.createTrackbar("high_sat", "Thresholded image", high_sat, 255,
                       update_high_sat)
    cv2.createTrackbar("low_val", "Thresholded image", low_val, 255,
                       update_low_val)
    cv2.createTrackbar("high_val", "Thresholded image", high_val, 255,
                       update_high_val)

    cv2.createTrackbar("color", "Blob parameters", color, 255, update_color)
    cv2.createTrackbar("min_area", "Blob parameters", min_area, 307200,
                       update_min_area)
    cv2.createTrackbar("max_area", "Blob parameters", max_area, 307200,
                       update_max_area)
    cv2.createTrackbar("min_circularity", "Blob parameters", min_circularity,
                       100, update_min_circularity)
    cv2.createTrackbar("max_circularity", "Blob parameters", max_circularity,
                       100, update_max_circularity)
    cv2.createTrackbar("min_inertia_ratio", "Blob parameters",
                       min_inertia_ratio, 100, update_min_inertia_ratio)
    cv2.createTrackbar("max_inertia_ratio", "Blob parameters",
                       max_inertia_ratio, 100, update_max_inertia_ratio)
    cv2.createTrackbar("min_convexity", "Blob parameters", min_convexity, 100,
                       update_min_convexity)
    cv2.createTrackbar("max_convexity", "Blob parameters", max_convexity, 100,
                       update_max_convexity)
    cv2.createTrackbar("min_dist_between_blobs", "Blob parameters",
                       min_dist_between_blobs, 640,
                       update_min_dist_between_blobs)

    blob_parameters = cv2.SimpleBlobDetector_Params()

    # Set all filters either on or off
    blob_parameters.filterByColor = True
    blob_parameters.filterByArea = True
    blob_parameters.filterByCircularity = True
    blob_parameters.filterByInertia = True
    blob_parameters.filterByConvexity = True

    while not rospy.is_shutdown():
        # Only show a fresh image when we have one from the camera
        # This is a way of optimising performance
        if new_img_available:
            # Convert the image from Image message to OpenCV image format
            cv_image = bridge.imgmsg_to_cv2(new_img_msg,
                                            desired_encoding='bgr8')

            cv2.imshow('blob_detector/go_to_green_square', cv_image)
            # Show the image for 1 ms. Without this line, the program will not work.
            cv2.waitKey(1)
            # Set the boolean to False, indicating that we have used up the camera image
            new_img_available = False
            hsv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV)

            # Create arrays from lower and upper hue, saturation and value limits
            # because the thresholding function cv2.inRange() takes numpy arrays as input

            lower_limits = np.array([low_hue, low_sat, low_val])
            upper_limits = np.array([high_hue, high_sat, high_val])

            # Threshold the image using the defined value ranges that are changable with sliders
            thresholded_image = cv2.inRange(hsv_image, lower_limits,
                                            upper_limits)
            thresholded_image = cv2.bitwise_and(cv_image,
                                                cv_image,
                                                mask=thresholded_image)

            # Show the thresholded image in the same frame where we placed the sliders
            cv2.imshow("Thresholded image", thresholded_image)

            blob_parameters.blobColor = color  # Detect either black or white objects

            # Filter by size
            blob_parameters.minArea = min_area
            blob_parameters.maxArea = max_area

            # Filter by shape
            blob_parameters.minCircularity = min_circularity / 100
            blob_parameters.maxCircularity = max_circularity / 100

            blob_parameters.minInertiaRatio = min_inertia_ratio / 100
            blob_parameters.maxInertiaRatio = max_inertia_ratio / 100

            blob_parameters.minConvexity = min_convexity / 100
            blob_parameters.maxConvexity = max_convexity / 100

            # Set the minimum distance that needs to be between two blobs
            # in order for them both to be detected
            blob_parameters.minDistBetweenBlobs = min_dist_between_blobs

            # Define the blob detector
            detector = cv2.SimpleBlobDetector_create(blob_parameters)

            # Use the blob detector to detect shapes on the thresholded image
            keypoints = detector.detect(thresholded_image)

            # Only attempt to access attributes of the detected shapes if any are found
            if len(keypoints) > 0:
                first_blob = keypoints[0]

                x_coord = int(first_blob.pt[0])
                y_coord = int(first_blob.pt[1])
                blob_size = int(first_blob.size)

                print(x_coord, y_coord, blob_size)

                blob_position = Point()
                blob_position.x = x_coord
                blob_position.y = y_coord

                blob_position_pub.publish(blob_position)

    # If the letter Q is pressed on the keyboard, end the while-loop
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
Exemple #29
0
    im = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)

    mask = np.zeros_like(im)
    mask[:, :, 0] = cv2.inRange(im, green_filter[0], green_filter[1])
    mask[:, :, 0] = cv2.dilate(mask[:, :, 0], kernel)

    mask[:, :, 1] = cv2.inRange(im, red_filter[0], red_filter[1])
    mask[:, :, 1] = cv2.dilate(mask[:, :, 1], kernel)

    mask[:, :, 2] = cv2.inRange(im, purple_filter[0], purple_filter[1])
    mask[:, :, 2] = cv2.dilate(mask[:, :, 2], kernel)

    return mask


params = cv2.SimpleBlobDetector_Params()

# Change thresholds
params.minThreshold = 0
params.maxThreshold = 11

# Filter by Area.
params.filterByArea = True
params.minArea = 400
params.maxArea = 1500

# Filter by Circularity
params.filterByCircularity = True
params.minCircularity = 0.5
params.maxCircularity = 1.0
Exemple #30
0
def getKeypoints(img):
    kernel = np.ones((5, 5), np.uint8)
    tempImg = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)
    params = cv2.SimpleBlobDetector_Params()
    detector = cv2.SimpleBlobDetector_create(params)
    return detector.detect(tempImg)