Example #1
0
    def _filter_hue(self, mid, include_distance=5):
        """Return bitmask of image where hue is within include_distance of mid.

        Args:

            mid - Hue to target. Use (real hue / 2). Example, (30 / 2) for
            orange.

            include_distance - Hue distance from mid that should be included in
            bitmask.


        """

        if mid - include_distance < 0:
            # We're calculating a value too near the 0 end of the hue circle.
            # Some values near 180 need to be included in this mask.
            low_side = cv2.inRange(self.im_hue, np.array(0), np.array(mid + include_distance))
            high_side = cv2.inRange(self.im_hue, np.array(180 + (mid - include_distance)), np.array(180))
            return cv2.bitwise_or(low_side, high_side)
        elif mid + include_distance >= 180:
            # We're calculating a value too near the 180 end of the hue circle.
            # Some values near 0 need to be included in this mask.
            low_side = cv2.inRange(self.im_hue, np.array(0 + (mid + include_distance - 180)), np.array(180))
            high_side = cv2.inRange(self.im_hue, np.array(mid - include_distance), np.array(180))
            return cv2.bitwise_or(low_side, high_side)
        return cv2.inRange(self.im_hue, np.array(mid - include_distance), np.array(mid + include_distance))
Example #2
0
    def getLabelMask(self):
        #returns mask for coloured pixels
        imgHSV = cv2.cvtColor(self.img, cv2.COLOR_BGR2HSV)

        #white
        lowerWhite = np.array([0,0,0], dtype=np.uint8)
        upperWhite = np.array([0,0,255], dtype=np.uint8)
        maskWhite = cv2.inRange(imgHSV, lowerWhite, upperWhite)

        #black
        lowerBlack = np.array([0, 0, 0], dtype=np.uint8)
        upperBlack = np.array([180, 255, 30], dtype=np.uint8)
        maskBlack = cv2.inRange(imgHSV, lowerBlack, upperBlack)

        mask = cv2.bitwise_or(maskWhite, maskBlack)
        mask = cv2.bitwise_not(mask)

        imgGRAY = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
        lowerWhite = np.array([250], dtype=np.uint8)
        upperWhite = np.array([255], dtype=np.uint8)
        maskWhite = cv2.inRange(imgGRAY, lowerWhite, upperWhite)

        lowerBlack = np.array([0], dtype=np.uint8)
        upperBlack = np.array([5], dtype=np.uint8)
        maskBlack = cv2.inRange(imgGRAY, lowerBlack, upperBlack)

        mask1 = cv2.bitwise_or(maskWhite, maskBlack)
        mask1 = cv2.bitwise_not(mask1)

        mask = cv2.bitwise_and(mask, mask1)

        return mask
Example #3
0
 def threshold(self, frame):
     '''Returns a binary image with pixels falling into the appropriate hsv range.'''
     mask = cv2.inRange(frame, self.hsv_values[:, 0], self.hsv_values[:, 1])
     if self.hsv_values2 is not None:
         mask2 = cv2.inRange(frame, self.hsv_values2[:, 0], self.hsv_values2[:, 1])
         cv2.bitwise_or(mask, mask2, dst=mask)
     return mask
Example #4
0
def mask_thinning(img):
    """
    returns the skeleton (thinned image) of a mask.
    This uses `thinning.guo_hall_thinning` if available and otherwise falls back
    to a slow python implementation taken from 
    http://opencvpython.blogspot.com/2012/05/skeletonization-using-opencv-python.html
    """
    try:
        import thinning
    except ImportError:
        # thinning module was not available and we use a python implementation
        size = np.size(img)
        skel = np.zeros(img.shape, np.uint8)
         
        kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
        while True:
            eroded = cv2.erode(img, kernel)
            temp = cv2.dilate(eroded, kernel)
            cv2.subtract(img, temp, temp)
            cv2.bitwise_or(skel, temp, skel)
            img = eroded
         
            zeros = size - cv2.countNonZero(img)
            if zeros==size:
                break
    else:
        # use the imported thinning algorithm
        skel = thinning.guo_hall_thinning(img)
        
    return skel
Example #5
0
 def combine_masks(masks):
     if len(masks) == 1:
         return masks[0]
     resulting_mask = Image.from_attributes(masks[0].width, masks[0].height, ColorMode.BLACK_AND_WHITE)
     for i in range(1, len(masks)):
         cv2.bitwise_or(masks[i - 1].cv_image, masks[i].cv_image, resulting_mask.cv_image)
     return resulting_mask
def morphological_skeleton(img, maxIter=1024):
    """Summary

    Args:
        img (TYPE): Description
        maxIter (int, optional): Description

    Returns:
        TYPE: Description
    """
    # url: http://felix.abecassis.me/2011/09/opencv-morphological-skeleton/
    element = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))

    height, width = img.shape[:2]
    skel = np.zeros((height, width, 1), np.uint8)
    temp = np.zeros((height, width, 1), np.uint8)

    done = False
    nbIteration = 0
    while not done:
        eroded = cv2.erode(img, element)
        temp = cv2.dilate(eroded, element)
        cv2.subtract(img, temp, temp)
        cv2.bitwise_or(skel, temp, skel)
        img = eroded
        nbIteration += 1
        done = (cv2.countNonZero(img) == 0) or (nbIteration >= maxIter)

    return skel, nbIteration
Example #7
0
def blob_process(cv_image):
  hsv_cv_image = convert_bgr2hsv(cv_image)

  yellow_mask_hsv = cv2.inRange(hsv_cv_image, YELLOW_MIN_HSV, YELLOW_MAX_HSV)
  red_mask_hsv = cv2.inRange(hsv_cv_image, RED_MIN_HSV, RED_MAX_HSV)
  green_mask_hsv = cv2.inRange(hsv_cv_image, GREEN_MIN_HSV, GREEN_MAX_HSV)
  blue_mask_hsv = cv2.inRange(hsv_cv_image, BLUE_MIN_HSV, BLUE_MAX_HSV)

  yellow_masked_img = cv2.bitwise_and(cv_image, cv_image, mask=yellow_mask_hsv)
  red_masked_img = cv2.bitwise_and(cv_image, cv_image, mask=red_mask_hsv)
  green_masked_img = cv2.bitwise_and(cv_image, cv_image, mask=green_mask_hsv)
  blue_masked_img = cv2.bitwise_and(cv_image, cv_image, mask=blue_mask_hsv)

  yr_or_img = cv2.bitwise_or(yellow_masked_img, red_masked_img)
  bg_or_img = cv2.bitwise_or(green_masked_img, blue_masked_img)
  final_or_img = cv2.bitwise_or(yr_or_img, bg_or_img)
  #final_or_img = cv2.bitwise_or(yr_or_img, green_masked_img)

  #kernel = np.ones((5,5),np.uint8)
  kernel = np.array([[0,1,1,1,0],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[0,1,1,1,0]],np.uint8)

  eroded_masked_img = cv2.erode(final_or_img, kernel, iterations = 1)
  dilatated_masked_img = cv2.dilate(eroded_masked_img,kernel,iterations = 2)
  #ilatated_masked_img = cv2.dilate(cv_image,kernel,iterations = 1)
  #bilateral = cv2.bilateralFilter(cv_image, 2,25,25)
  return dilatated_masked_img
  #return bilateral
  bouy_detector = cv2.SimpleBlobDetector()
  keypoints = bouy_detector.detect(processed_cv_image)
  cv2.drawKeypoints(processed_cv_image, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
  return processed_cv_image
Example #8
0
def f2f(bg, curr, bgMask):
    sift = cv2.SIFT(nOctaveLayers=3, contrastThreshold=0.05, edgeThreshold=10)
    prev = bg.toImg()
    N, M, _ = prev.shape
    kp1, des1 = sift.detectAndCompute(prev, bgMask)
    kp2, des2 = sift.detectAndCompute(curr, None)
    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)
    flann = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(des1, des2, k=2)
    good = []
    for m, n in matches:
        if m.distance < 0.75 * n.distance:
            good.append(m)
    if len(good) > 10:
        src_pts = np.float32(
                [kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
        dst_pts = np.float32(
                [kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
        H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 7.0)
        
    print H
    print len(kp1), len(kp2), len(good)
    
    img4 = cv2.warpPerspective(curr, H, (M, N), flags=cv2.WARP_INVERSE_MAP)
    mask4 = cv2.warpPerspective(np.zeros((720,1280), dtype=np.uint8), H, (M, N),
                                flags=cv2.WARP_INVERSE_MAP,  borderValue=255)
    mask4 = cv2.bitwise_not(mask4)
    bg.add(img4, mask4)
    cv2.bitwise_or(bgMask, mask4, dst=bgMask)
    return H
Example #9
0
    def find_appropriate_mask(self, img):
        # Let's build a simple mask to separate pieces from background
        # just by checking if pixel is in range of some colours
        m = 0
        res = None

        # Here we calculate mask to separate background of the scanner
        scanner_bg = self.replace_scanner_background(img)

        # And here we are trying to check different ranges for different
        # background to find the winner.
        hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

        cv2.imwrite("foobar.jpg", hsv)

        for bg in self.backgrounds:
            mask = np.zeros(img.shape[:2], np.uint8)

            for rng in bg:
                # As each pre-defined background is described by a list of
                # color ranges we are summing up their masks
                mask = cv2.bitwise_or(mask, cv2.inRange(hsv, rng[0], rng[1]))

            hist = cv2.calcHist([mask], [0], None, [2], [0, 256])
            # And here we are searching for a biggest possible mask across all
            # possible predefined backgrounds
            if hist[1] > m:
                m = hist[1]
                res = mask

        # then we remove scanner background
        res = cv2.bitwise_or(scanner_bg, res)
        res = cv2.morphologyEx(res, cv2.MORPH_OPEN, np.ones((3, 7), np.uint8))

        return cv2.bitwise_not(res)
Example #10
0
File: OCR.py Project: xancros/TFG
def redAreaDetection(image, name, show=False):
    img = image.copy()
    test3 = getBinaryInvMask(img)
    pp = cv2.bitwise_and(img, img, mask=test3)
    # cambiar espacio rgb -> HSV
    img = image.copy()
    b, g, r = cv2.split(img)
    r = preProcessImage(r)
    g = preProcessImage(g)
    b = preProcessImage(b)
    rgb = [b, g, r]
    prc = cv2.merge(rgb)
    prueba = cv2.cvtColor(prc, cv2.COLOR_BGR2HSV)
    imag2 = cv2.cvtColor(pp, cv2.COLOR_BGR2HSV)
    h, s, v = cv2.split(imag2)
    # s += 50
    s = cv2.equalizeHist(s)
    v = cv2.equalizeHist(v)
    chs = [h, s, v]
    imgRes = cv2.merge(chs)
    # cv2.imshow("source",img)
    # cv2.imshow("imag2",imag2)
    # imgRes = imag2.copy()
    test = cv2.cvtColor(imgRes, cv2.COLOR_HSV2BGR)
    im = cv2.inRange(imgRes, (0, 100, 30), (15, 255, 255))
    im2 = cv2.inRange(imgRes, (160, 100, 30), (180, 255, 255))
    #### imgF = im + im2
    imgF = cv2.bitwise_or(im, im2)
    imP = cv2.inRange(prueba, (0, 100, 30), (15, 255, 255))
    imP2 = cv2.inRange(prueba, (160, 100, 30), (180, 255, 255))
    ##### imgFP = imP + imP2
    imgFP = cv2.bitwise_or(imP, imP2)
    # cv2.imshow("PRUEBA", test)
    #
    # printHSV_Values(imgRes)
    # printHSV_Values(prueba)

    ##### final = imgF + imgFP
    final = cv2.bitwise_or(imgF, imgFP)
    # cv2.imshow("PRUEBA2", test3)
    # cv2.imshow("pr",imP)
    # cv2.imshow("ORIGINAL", imP2)
    # cv2.imshow("imgF",imgF)
    # cv2.imshow("imFP",imgFP)
    # cv2.imshow("final",final)
    # cv2.waitKey(800)
    # cv2.destroyAllWindows()

    if (show):
        cv2.imshow("image", image)
        cv2.imshow("win1", im)
        cv2.imshow("win2", im2)
        cv2.imshow("win3", imgF)
        cv2.waitKey()
        cv2.destroyWindow("image")
        cv2.destroyWindow("win1")
        cv2.destroyWindow("win2")
        cv2.destroyWindow("win3")
    return final
Example #11
0
def combine_images():
    im1 = cv2.imread("/Users/fogleman/Workspace/maptiles/zoom18t.png")
    im2 = cv2.imread("/Users/fogleman/Workspace/maptiles/zoom19t.png")
    im3 = cv2.imread("/Users/fogleman/Workspace/maptiles/zoom20s.png")
    im1 = isolate_buildings(im1)
    im2 = isolate_buildings(im2)
    im3 = isolate_buildings(im3)
    im = cv2.bitwise_or(cv2.bitwise_or(im1, im2), im3)
    return im
Example #12
0
def splitNmerge(mask, image):
	splitted = cv2.split(image)
	blue = splitted[0]
	green = splitted[1]
	red = splitted[2]

	blue = cv2.bitwise_or(blue, mask)
	green = cv2.bitwise_or(green, mask)
	red = cv2.bitwise_or(red, mask)

	return cv2.merge([blue,green,red])
Example #13
0
def make_mask(background, foreground, hsv):
  withoutback = cv2.subtract(foreground, background)
  channels = cv2.split(withoutback)
  satchannel = cv2.split(hsv)[1]
  retval, blue = cv2.threshold(channels[0], 10, 255, cv2.THRESH_BINARY)
  retval, green = cv2.threshold(channels[1], 10, 255, cv2.THRESH_BINARY)
  retval, red = cv2.threshold(channels[2], 10, 255, cv2.THRESH_BINARY)
  retval, sat = cv2.threshold(satchannel, 20, 255, cv2.THRESH_BINARY)
  rgb = cv2.bitwise_or(cv2.bitwise_or(blue, green), red)
  base_mask = cv2.bitwise_and(rgb, sat)
  return cv2.erode(base_mask, disk)
Example #14
0
 def get_binary_img(self, check_puck=False):
     if check_puck:
         mask_dark = cv2.inRange(self.image, self.green_lower_dark_bgr, self.green_upper_dark_bgr)
         mask = cv2.inRange(self.image, self.green_lower_bgr, self.green_upper_bgr)
         mask_light = cv2.inRange(self.image, self.green_lower_light_bgr, self.green_upper_light_bgr)
         mask_lighter = cv2.inRange(self.image, self.green_lower_superlight_bgr, self.green_upper_superlight_bgr)
         return cv2.bitwise_or(cv2.bitwise_or(mask_dark, mask), cv2.bitwise_or(mask_light, mask_lighter))
     else:
         mask_dark = cv2.inRange(self.image, self.blue_dark_lower_bgr, self.blue_dark_upper_bgr)
         mask = cv2.inRange(self.image, self.blue_dark_lower_bgr, self.blue_dark_upper_bgr)
         return cv2.bitwise_or(mask, mask_dark)
def otsuThree(img, img_file, man_img, mask=None):

    blur = cv2.GaussianBlur(img, (5,5),0)

    thresholds = threeThresholdOtsu(blur,mask)
    th1 = thresholds[0]
    th2 = thresholds[1]
    th3 = thresholds[2]


    if mask is None:
        ret, thresh1 = cv2.threshold(blur,th1,150,cv2.THRESH_BINARY)
        ret, thresh2 = cv2.threshold(blur,th2,150,cv2.THRESH_BINARY_INV)
        lower_img = cv2.bitwise_and(thresh1, thresh2, mask=None)
        ret, thresh2 = cv2.threshold(blur, th2, 200, cv2.THRESH_BINARY)
        ret, thresh3 = cv2.threshold(blur, th3, 200, cv2.THRESH_BINARY_INV)
        middle_img = cv2.bitwise_and(thresh2, thresh3, mask=None)
        ret, upper_img = cv2.threshold(blur, th3, 255, cv2.THRESH_BINARY)
    else:
        combined_img = cv2.bitwise_and(blur, blur, mask=mask)
        ret, thresh1 = cv2.threshold(combined_img,th1,150,cv2.THRESH_BINARY)
        ret, thresh2 = cv2.threshold(combined_img,th2,150,cv2.THRESH_BINARY_INV)
        lower_img = cv2.bitwise_and(thresh1, thresh2, mask=mask)
        ret, thresh2 = cv2.threshold(blur, th2, 200, cv2.THRESH_BINARY)
        ret, thresh3 = cv2.threshold(blur, th3, 200, cv2.THRESH_BINARY_INV)
        middle_img = cv2.bitwise_and(thresh2, thresh3, mask=mask)
        ret, upper_img = cv2.threshold(blur, th3, 255, cv2.THRESH_BINARY)


    out_img_one = cv2.bitwise_or(lower_img, middle_img, mask=mask)
    out_img_o = cv2.bitwise_or(out_img_one, upper_img, mask=mask)

    out_info_o = "_otsu_%d-%d-%d" % (th1, th2, th3)
    out_str_o = out_info_o + '.png'
    out_file_o = re.sub(r'\.jpg', out_str_o, img_file)
    cv2.imwrite(out_file_o, out_img_o)
    t = evaluation.findTotals(out_img_o, man_img)
    f = open('o3_all.txt', 'a')
    f.write(img_file + " " + str(t[0]) + " " + str(t[1]) + " " + str(t[2]) + " " + str(t[3]) + "\n")
    f.close()
    t = evaluation.findTotalsOtsu3(out_img_o, man_img, 150)
    f = open('o3_t1-t2_all.txt', 'a')
    f.write(img_file + " " + str(t[0]) + " " + str(t[1]) + " " + str(t[2]) + " " + str(t[3]) + "\n")
    f.close()
    t = evaluation.findTotalsOtsu3(out_img_o, man_img, 200)
    f = open('o3_t2-t3_all.txt', 'a')
    f.write(img_file + " " + str(t[0]) + " " + str(t[1]) + " " + str(t[2]) + " " + str(t[3]) + "\n")
    f.close()
    t = evaluation.findTotalsOtsu3(out_img_o, man_img, 255)
    f = open('o3_t3-max_all.txt', 'a')
    f.write(img_file + " " + str(t[0]) + " " + str(t[1]) + " " + str(t[2]) + " " + str(t[3]) + "\n")
    f.close()
Example #16
0
def mask_thinning(img, method='auto'):
    """
    returns the skeleton (thinned image) of a mask.
    This uses `thinning.guo_hall_thinning` if available and otherwise falls back
    to a slow python implementation taken from 
    http://opencvpython.blogspot.com/2012/05/skeletonization-using-opencv-python.html
    Note that this implementation is not equivalent to guo_hall implementation
    """
    # try importing the thinning module
    try:
        import thinning
    except ImportError:
        thinning = None
    
    # determine the method to use if automatic method is requested
    if method == 'auto':
        if thinning is None:
            method = 'python'
        else:
            method = 'guo-hall'
    
    # do the thinning with the requested method
    if method == 'guo-hall':
        if thinning is None:
            raise ImportError('Using the `guo-hall` method for thinning '
                              'requires the `thinning` module, which could not '
                              'be imported.')
        skel = thinning.guo_hall_thinning(img)
    
    elif method =='python':
        # thinning module was not available and we use a python implementation
        size = np.size(img)
        skel = np.zeros(img.shape, np.uint8)
         
        kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
        while True:
            eroded = cv2.erode(img, kernel)
            temp = cv2.dilate(eroded, kernel)
            cv2.subtract(img, temp, temp)
            cv2.bitwise_or(skel, temp, skel)
            img = eroded
         
            zeros = size - cv2.countNonZero(img)
            if zeros==size:
                break
            
    else:
        raise ValueError('Unknown thinning method `%s`' % method)
        
    return skel
Example #17
0
 def camCallback(self, rosimg):
     rospy.loginfo("Inside cam")
     cvImg = Utils.rosimg2cv(rosimg)
     board = Img(cvImg, b_lowThresh, b_hiThresh)
     self.drawBoard(board.maskImg,board.mask_bgr)
     peg = Img(cvImg, p_lowThresh, p_hiThresh)
     slot = Img(cvImg, s_lowThresh, s_hiThresh)
     slot.maskImg = static.morph(slot.maskImg)
     slot.mask_bgr = cv2.cvtColor(slot.maskImg, cv2.COLOR_GRAY2BGR)
     total_img = cv2.bitwise_or(slot.mask_bgr, peg.mask_bgr)
     total_img = cv2.bitwise_or(total_img, board.mask_bgr)
     self.drawSlot(slot.maskImg, total_img)
     self.drawPeg(peg.maskImg, total_img)
     self.publishImg(board, total_img)
Example #18
0
def fillMaskAreaWithLightDark(img, imgMask, flag_backgroundIsLight):
    """Fill masked out area with light or dark."""
    # ATTN: unfinished
    imgMaskInverted = 255 - imgMask
    # imgMaskInverted = cv2.bitwise_not(imgMask)
    # dicerfuncs.cvImgShow("INVERTEST MASK",imgMaskInverted)
    # return img
    # print imgMask
    outImg = img.copy()
    # outImg = dicerfuncs.makeBinaryImageMaskForImg(img)
    if (flag_backgroundIsLight):
        cv2.bitwise_or(img, 255, outImg, mask=imgMaskInverted)
    else:
        cv2.bitwise_and(img, 0, outImg, mask=imgMaskInverted)
    return outImg
def get_skel(img):
    _, img = cv2.threshold(img, 187, 255, cv2.THRESH_BINARY)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    skel = np.zeros(img.shape, img.dtype)
    element = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
    done = False
    while not done:
        temp = cv2.morphologyEx(img, cv2.MORPH_OPEN, element)
        temp = cv2.bitwise_or(temp, temp)
        temp = cv2.bitwise_and(img, temp, temp)
        skel = cv2.bitwise_or(skel, temp, skel)
        img = cv2.erode(img, element)
        minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(img)
        done = (maxVal == 0)
    return skel
Example #20
0
def preprocess_hsv(hsv):
    beige = cv2.inRange(hsv, np.array([75, 55, 80], np.uint8), np.array([135, 222, 255], np.uint8))
    orange = cv2.inRange(hsv, np.array([105, 120, 70], np.uint8), np.array([128, 255, 255], np.uint8))
    blue = cv2.inRange(hsv, np.array([0, 116, 90], np.uint8), np.array([32, 255, 241], np.uint8))
    black = cv2.inRange(hsv, np.array([0, 0, 5], np.uint8), np.array([175, 160, 75], np.uint8))
    colored = beige
    colored = cv2.bitwise_or(colored, orange)
    colored = cv2.bitwise_or(colored, blue)
    colored = cv2.bitwise_or(colored, black)
    for i in range(15):
        colored = cv2.morphologyEx(colored, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_CROSS, (9, 9)))
        colored = cv2.morphologyEx(colored, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7)))
        colored = cv2.morphologyEx(colored, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_CROSS, (15, 15)))
        colored = cv2.blur(colored, (3, 3))
    return colored
def skeletonization(img):
    '''
    http://opencvpython.blogspot.ru/2012/05/skeletonization-using-opencv-python.html
    '''
    img = img.copy()
    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    size = np.size(img)
    skel = np.zeros(img.shape, np.uint8)

    # ret, img = cv2.threshold(img, 127, 255, 0)
    img = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 7, 2)
    element = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))

    while True:
        eroded = cv2.erode(img, element)
        temp = cv2.dilate(eroded, element)
        temp = cv2.subtract(img, temp)
        skel = cv2.bitwise_or(skel, temp)
        img = eroded.copy()

        zeros = size - cv2.countNonZero(img)
        if zeros == size:
            break

    cv2.imwrite("skel.png", skel)
    return skel
Example #22
0
def play():
    cap = cv2.VideoCapture("sep/25_68351.avi")
    ms = pickle.load(open("sep/25_68351.ms", "rb"))
    nms = rollingMean(ms)
    # vis(ms, nms)
    fn = 0
    ret, iframe = cap.read()
    H, W, _ = iframe.shape
    tpl = template()
    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break
        fn += 1
        res, mtxR, mtxQ, Qx, Qy, Qz = cv2.RQDecomp3x3(ms[fn])
        # print mtxR
        # print mtxQ
        import ipdb

        ipdb.set_trace()
        crotMean = cv2.warpPerspective(tpl, nms[fn], (W, H))
        frame[..., 1] = cv2.bitwise_or(crotMean, frame[..., 1])
        cv2.imshow("frame", frame)
        key = cv2.waitKey(5) & 0xFF
        if key == ord("q"):
            return
    def _colorFilter(self, color):
        # threshold colors in HSV space
        if color == 'white':
            bw = cv2.inRange(self.hsv, self.hsv_white1, self.hsv_white2)
        elif color == 'yellow':
            bw = cv2.inRange(self.hsv, self.hsv_yellow1, self.hsv_yellow2)
        elif color == 'red':
            bw1 = cv2.inRange(self.hsv, self.hsv_red1, self.hsv_red2)
            bw2 = cv2.inRange(self.hsv, self.hsv_red3, self.hsv_red4)
            bw = cv2.bitwise_or(bw1, bw2)
        elif color == 'green':
            bw = cv2.inRange(self.hsv, self.hsv_green1, self.hsv_green2)
        elif color == 'blue':
            bw = cv2.inRange(self.hsv, self.hsv_blue1, self.hsv_blue2)
        elif color == 'no':
            bw = cv2.inRange(self.hsv, self.hsv_no1, self.hsv_no2)
        else:
            raise Exception('Error: Undefined color strings...')

        # binary dilation
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(self.dilation_kernel_size, self.dilation_kernel_size))
        bw = cv2.dilate(bw, kernel)
        
        # refine edge for certain color
        edge_color = cv2.bitwise_and(bw, self.edges)

        return bw, edge_color
Example #24
0
def skeletonize(image, size, structuring=cv2.MORPH_RECT):
    # determine the area (i.e. total number of pixels in the image),
    # initialize the output skeletonized image, and construct the
    # morphological structuring element
    area = image.shape[0] * image.shape[1]
    skeleton = np.zeros(image.shape, dtype="uint8")
    elem = cv2.getStructuringElement(structuring, size)

    # keep looping until the erosions remove all pixels from the
    # image
    while True:
        # erode and dilate the image using the structuring element
        eroded = cv2.erode(image, elem)
        temp = cv2.dilate(eroded, elem)

        # subtract the temporary image from the original, eroded
        # image, then take the bitwise 'or' between the skeleton
        # and the temporary image
        temp = cv2.subtract(image, temp)
        skeleton = cv2.bitwise_or(skeleton, temp)
        image = eroded.copy()

        # if there are no more 'white' pixels in the image, then
        # break from the loop
        if area == area - cv2.countNonZero(image):
            break

    # return the skeletonized image
    return skeleton
Example #25
0
def get_card_properties_v2(card, debug=False):

    # convert to HSV colorspace and get binary representation of image
    bin_sat_val, hue, sat, val = get_binary_from_hsv(card)
    binary = cv2.bitwise_or(bin_sat_val, get_canny(card))

    # get contours from binary image
    contours, contour_areas, contour_boxes, contour_centers = get_contour_info(binary, True)

    # crop image so we're only looking at the bounding rectangle
    x, y, w, h = contour_boxes[0]
    hue_crop = hue[y : y + h, x : x + w]

    # no more than 3 shapes per card
    prop_num_init = get_dropoff([b[2] * b[3] for b in contour_boxes], maxratio=1.2)
    prop_num = prop_num_init if prop_num_init < 3 else 3
    prop_col = get_color_from_hue(hue_crop)
    prop_shp = get_shape_from_contour(contours[0], contour_boxes[0])
    prop_tex = get_texture_from_hue_val(hue, val, contour_boxes[0])

    if debug:
        pretty_print_properties([(prop_num, prop_col, prop_shp, prop_tex)])
        util.show(card)

    return (prop_num, prop_col, prop_shp, prop_tex)
Example #26
0
def objectIdentification(imageFile) :
   image = imageRead(imageFile) 
   accumEdged = np.zeros(image.shape[:2], dtype="uint8")
   # loop over the blue, green, and red channels, respectively
   for chan in cv2.split(image):
        # blur the channel, extract edges from it, and accumulate the set
        # of edges for the image
        chan = cv2.medianBlur(chan, 11)
        edged = cv2.Canny(chan, 50, 200)
        accumEdged = cv2.bitwise_or(accumEdged, edged)
# show the accumulated edge map
   # cv2.imshow("Edge Map", accumEdged)
   # find contours in the accumulated image, keeping only the largest
   # ones
   (cnts, _) = cv2.findContours(accumEdged.copy(), cv2.RETR_EXTERNAL,
        cv2.CHAIN_APPROX_SIMPLE)
   orig = image.copy()

   print len(cnts)
   my_list = []
   for c in cnts :
      area = cv2.contourArea(c)
      print "Area",area
      my_list.append(area)

   mi = max(my_list)
   index = my_list.index(mi)
   newC = cnts[index]
   orig = draw_contour(orig, newC, 1)

   # show the original, unsorted contour image
   # cv2.imshow("Unsorted", orig)
   return newC
Example #27
0
 def find_green_buoy(img):
     h1 = 51
     s1 = 0
     v1 = 0
     h2 = 83
     s2 = 255
     v2 = 255
     lower_value = np.array([h1, s1, v1], np.uint8)
     upper_value = np.array([h2, s2, v2], np.uint8)
     #endthresholding
     temp = np.array(0)
     cv2.rectangle(img,(0,0),(960,200),(0,0,0),-1)        #kernel = np.ones((150,150),np.uint8)
     mask = cv2.inRange(img, lower_value, upper_value)
     bImg = cv2.bitwise_or(mask, temp)
     #cv2.rectangle(thresh,(0,0),(960,200),(0,0,0),-1)        #kernel = np.ones((150,150),np.uint8)
     #bImg = cv2.morphologyEx(bImg, cv2.MORPH_CLOSE, kernel)#messed something up was included in a tutorial not entirely required here
     ret, thresh = cv2.threshold(bImg, 127, 255, 0)
     #end thresholding and building heat mask
     contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
     contours = contour_area_quick_sort(contours)
     if len(contours)>0:
         moment0 = cv2.moments(contours[0])
         if moment0['m00']==0: return 0
         cx0 = int(moment0['m10']/moment0['m00'])
         cy0 = int(moment0['m01']/moment0['m00'])
         return [cx0, cy0, 0]
     else: return None
Example #28
0
def esquelet (image):

    img=np.uint8(image)
    mask = np.zeros(img.shape, dtype=np.uint8)
    img1 = np.uint8(img)
    size = np.size(img1)
    skel = np.zeros(img1.shape,np.uint8)
    element = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3))
    done=False

    #skeletonization        

    while(not done):

        eroded = cv2.erode(img1,element)
        temp = cv2.dilate(eroded,element)
        temp = cv2.subtract(img1,temp)
        skel = cv2.bitwise_or(skel,temp)
        img1 = eroded.copy()

        zeros = size - cv2.countNonZero(img1)

        if zeros==size:

            done = True

    return [skel,img]
Example #29
0
    def calibrate_hsv(self, img):        
        element = self.element
        result = np.zeros((img.shape[0], img.shape[1]), np.uint8)
        img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        h,s,v = cv2.split(img_hsv)
        d = cv2.inRange(h, np.array([self.current_conf[0]],np.uint8), 
                           np.array([self.current_conf[1]],np.uint8))
        d2 = cv2.inRange(h, np.array([self.current_conf[2]],np.uint8), 
                            np.array([self.current_conf[3]],np.uint8))
        d = cv2.bitwise_or(d, d2)
        d = cv2.erode(d, element)
        d = cv2.dilate(d, element)
        d = cv2.dilate(d, element)
        d = cv2.dilate(d, element)
        d = cv2.dilate(d, element)
        result = d

        res = result.copy()
        contours, hier = cv2.findContours(res, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
        self.current_rect_count = len(contours)
        self.rects = self.choose_contour(contours)
        if len(self.rects) > 0:
            self.current_biggest_rect_area = area(self.rects[0])
        else:
            self.current_biggest_rect_area = 0
        middle_roi = get_roi(result, self.middle_rect)
        self.current_middle_non_zero = cv2.countNonZero(middle_roi)

        self.feedback()
Example #30
0
def get_binary_from_hsv(card):
    # convert from BGR colorspace to HSV colorspace
    hsv = cv2.cvtColor(card, cv2.COLOR_BGR2HSV)

    # separate hue, saturation, and value into three images
    hue, sat, val = [np.array([[col[i] for col in row] for row in hsv]) for i in xrange(3)]

    # get binary representation of saturation image
    # higher threshold = less white
    _, bin_sat = cv2.threshold(np.array(sat), thresh=55, maxval=255, type=cv2.THRESH_BINARY)
    # bin_sat = cv2.GaussianBlur(bin_sat, ksize=(5, 5), sigmaX=0)

    # get binary representation of value image
    # higher threshold = more white
    _, bin_val = cv2.threshold(np.array(val), thresh=140, maxval=255, type=cv2.THRESH_BINARY_INV)

    bin_sat_val = cv2.bitwise_or(bin_sat, bin_val)

    # erosion followed by morphological opening to erase noise and fill gaps
    # in shapes
    kernel_e = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2, 2))
    kernel_d = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (8, 8))
    bin_sat_val = cv2.erode(bin_sat_val, kernel_e)
    bin_sat_val = cv2.morphologyEx(bin_sat_val, cv2.MORPH_CLOSE, kernel_d)

    return bin_sat_val, hue, sat, val
Example #31
0
import numpy as np
import cv2
import time

cap = cv2.VideoCapture(0)
while (1):
    rval, img = cap.read()
    if rval == True:
        img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        mask1 = cv2.inRange(img_hsv, (0, 100, 100), (10, 255, 255))  #CALIB
        mask2 = cv2.inRange(img_hsv, (160, 100, 100), (179, 255, 255))  #CALIB
        mask = cv2.bitwise_or(mask1, mask2)
        croped = cv2.bitwise_and(img, img, mask=mask)

        cv2.imshow("Output", croped)

        key = cv2.waitKey(10)
        if key == 27:  # exit on ESC
            break
    elif rval == False:
        break
        end = time.time()

cap.release()
cv2.destroyAllWindows()
Example #32
0
        final_eroded = cv2.erode(final_masked, kernel, iterations=1)
        final_eroded, contours, hierchary = cv2.findContours(
            final_eroded, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        final_masked = cv2.drawContours(final_eroded, contours, -1, 0, 3)

        final_waste = cv2.morphologyEx(final_masked,
                                       cv2.MORPH_TOPHAT,
                                       kernel,
                                       iterations=2)
        final_waste = cv2.bitwise_not(final_waste)
        final_masked = cv2.bitwise_and(final_waste, final_masked)

        #MADE A LINE ON THE LEFT-BOTTOM OF THE PAGE
        final_masked = cv2.line(final_masked, (40, height), (400, height), 255,
                                100)
        #final_masked = cv2.line(final_masked,(width-300,height),(width,height),255,70)

        #USED FLOOD-FILL TO FILL IN THE SMALL BLACK LANES
        final_flood = final_masked.copy()
        h, w = final_masked.shape[:2]
        mask = np.zeros((h + 2, w + 2), np.uint8)
        cv2.floodFill(final_flood, mask, (0, 0), 255)
        final_flood = cv2.bitwise_not(final_flood)
        final_filled = cv2.bitwise_or(final_masked, final_flood)

        gt = cv2.imread(cur_gt)

        cv2.imwrite(
            os.path.join(pred_path, str(prefix + '_00000' + str(i) + '.png')),
            final_filled)
Example #33
0
def colorness(image, color_name, threshold=0, verbose=False):
    """ Colorness as defined in submission to ICCV
        blue-ness = #blue pixels / # pixels

        Use threshold = 0 for quantization of hue ranges
    """
    image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    # show color histograms for validation
    if verbose:
        h, s, v = hsv_histograms(image)
        plt.figure()
        plt.plot(h)
        plt.figure()
        plt.plot(s)
        plt.figure()
        plt.plot(v)
    # quantization of hue ranges
    # if threshold not 0, color name is changed into hue window
    if threshold == 0:
        hue_min, hue_max = quantize_hue_ranges(image, color_name)
        if verbose:
            print('hue min, hue max: ', hue_min, hue_max)
    else:
        h_point = color_picker(color_name)
        hue_min = round_hue(h_point[0][0][0] - threshold)
        hue_max = round_hue(h_point[0][0][0] + threshold)
        if verbose:
            print('hue min, hue max: ', hue_min, hue_max)
    if (hue_min == hue_max == 0) or (hue_min == 0 and hue_max == 255):
        #it is either black or white
        if color_name == 'black':
            low_c = np.array([0, 0, 0])
            upp_c = np.array([hue_max, 100, 100])
        if color_name == 'white':
            low_c = np.array([0, 0, 190])
            upp_c = np.array([hue_max, 50, 255])
        if verbose:
            print('low_c', low_c, 'upp_c', upp_c)
        mask = cv2.inRange(image, low_c, upp_c)
    elif hue_min > hue_max:
        low_c = np.array([0, 50, 77])
        upp_c = np.array([hue_max, 255, 255])
        mask1 = cv2.inRange(image, low_c, upp_c)

        low_c = np.array([hue_min, 50, 77])
        upp_c = np.array([180, 255, 255])
        mask2 = cv2.inRange(image, low_c, upp_c)
        mask = cv2.bitwise_or(mask1, mask1, mask2)
    else:
        low_c = np.array([hue_min, 50, 77])
        upp_c = np.array([hue_max, 255, 255])
        if verbose:
            print('low_c', low_c, 'upp_c', upp_c)
        mask = cv2.inRange(image, low_c, upp_c)
    if verbose:
        print(mask)
    res = cv2.bitwise_and(image, image, mask=mask)
    if verbose:
        plt.figure()
        plt.imshow(mask, cmap='Greys')
        plt.colorbar()
        plt.figure()
        plt.imshow(cv2.cvtColor(image, cv2.COLOR_HSV2RGB))
        plt.figure()
        plt.imshow(cv2.cvtColor(res, cv2.COLOR_HSV2RGB))
    x, y, z = image.shape
    if verbose:
        print(np.sum(mask == 255) / (float(x) * float(y)))
    return float(np.sum(mask == 255)) / (float(x) * float(y))
Example #34
0
import cv2
import numpy as np

image = cv2.imread('1.png')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
mask = np.zeros(image.shape, dtype=np.uint8)

cnts = cv2.findContours(gray, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]

cv2.fillPoly(mask, cnts, [255, 255, 255])
mask = 255 - mask
result = cv2.bitwise_or(image, mask)

cv2.imshow('mask', mask)
cv2.imshow('result', result)
cv2.waitKey(0)
    def image_callback(self, ros_image):
        # Time this loop to get cycles per second
        start = time.time()

        # Convert the ROS Image to OpenCV format using a cv_bridge helper
        # function
        self.frame = self.convert_image(ros_image)

        # Some webcams invert the image
        if self.flip_image:
            self.frame = cv2.flip(self.frame, 0)

        # Store the frame width and height in a pair of global variables
        if self.frame_width is None:
            self.frame_size = (self.frame.shape[1], self.frame.shape[0])
            self.frame_width, self.frame_height = self.frame_size

        # Create the marker image we will use for display purposes
        if self.marker_image is None:
            self.marker_image = np.zeros_like(self.frame)

        # Reset the marker image if we're not displaying the history
        if not self.keep_marker_history:
            self.marker_image = np.zeros_like(self.marker_image)

        # Process the image to detect and track objects or features
        processed_image = self.process_image(self.frame)

        # Make a global copy
        self.processed_image = processed_image.copy()

        # Display the user-selection rectangle or point
        self.display_selection()

        # Night mode: only display the markers
        if self.night_mode:
            self.processed_image = np.zeros_like(self.processed_image)

        # Merge the processed image and the marker image
        self.display_image = cv2.bitwise_or(self.processed_image,
                                            self.marker_image)

        # If we have a track box, then display it. The track box can be either a regular
        # cvRect (x,y,w,h) or a rotated Rect (center, size, angle).
        if self.show_boxes:
            if self.track_box is not None and self.is_rect_nonzero(
                    self.track_box):
                if len(self.track_box) == 4:
                    x, y, w, h = self.track_box
                    size = (w, h)
                    center = (x + w / 2, y + h / 2)
                    angle = 0
                    self.track_box = (center, size, angle)
                else:
                    (center, size, angle) = self.track_box

                # For face tracking, an upright rectangle looks best
                if self.face_tracking:
                    pt1 = (int(center[0] - size[0] / 2),
                           int(center[1] - size[1] / 2))
                    pt2 = (int(center[0] + size[0] / 2),
                           int(center[1] + size[1] / 2))
                    cv2.rectangle(self.display_image, pt1, pt2, (50, 255, 50),
                                  self.feature_size, 8, 0)
                else:
                    # Otherwise, display a rotated rectangle
                    vertices = np.int0(cv2.boxPoints(self.track_box))
                    cv2.drawContours(self.display_image, [vertices], 0,
                                     (50, 255, 50), self.feature_size)

            # If we don't yet have a track box, display the detect box if
            # present
            elif self.detect_box is not None and self.is_rect_nonzero(
                    self.detect_box):
                (pt1_x, pt1_y, w, h) = self.detect_box
                if self.show_boxes:
                    cv2.rectangle(self.display_image, (pt1_x, pt1_y),
                                  (pt1_x + w, pt1_y + h), (50, 255, 50),
                                  self.feature_size, 8, 0)

        # Publish the ROI
        self.publish_roi()

        # Compute the time for this loop and estimate CPS as a running average
        end = time.time()
        duration = end - start
        fps = int(1.0 / duration)
        self.cps_values.append(fps)
        if len(self.cps_values) > self.cps_n_values:
            self.cps_values.pop(0)
        self.cps = int(sum(self.cps_values) / len(self.cps_values))

        # Display CPS and image resolution if asked to
        if self.show_text:
            font_face = cv2.FONT_HERSHEY_SIMPLEX
            font_scale = 0.5

            # Print cycles per second (CPS) and resolution (RES) at top of the image
            if self.frame_size[0] >= 640:
                vstart = 25
                voffset = int(50 + self.frame_size[1] / 120.)
            elif self.frame_size[0] == 320:
                vstart = 15
                voffset = int(35 + self.frame_size[1] / 120.)
            else:
                vstart = 10
                voffset = int(20 + self.frame_size[1] / 120.)

            cv2.putText(self.display_image, "CPS: " + str(self.cps),
                        (10, vstart), font_face, font_scale, (255, 255, 0))
            cv2.putText(
                self.display_image, "RES: " + str(self.frame_size[0]) + "X" +
                str(self.frame_size[1]), (10, voffset), font_face, font_scale,
                (255, 255, 0))
# Making a ellipse
ellipse = np.zeros((300, 300), np.uint8)
cv2.ellipse(ellipse, (150, 150), (150, 150), 30, 0, 180, 255, -1)
cv2.imshow("Ellipse", ellipse)
cv2.waitKey(0)

cv2.destroyAllWindows()

# Shows only where they intersect
And = cv2.bitwise_and(square, ellipse)
cv2.imshow("AND", And)
cv2.waitKey(0)

# Shows where either square or ellipse is
bitwiseOr = cv2.bitwise_or(square, ellipse)
cv2.imshow("OR", bitwiseOr)
cv2.waitKey(0)

# Shows where either exist by itself
bitwiseXor = cv2.bitwise_xor(square, ellipse)
cv2.imshow("XOR", bitwiseXor)
cv2.waitKey(0)

# Shows everything that isn't part of the square
bitwiseNot_sq = cv2.bitwise_not(square)
cv2.imshow("NOT - square", bitwiseNot_sq)
cv2.waitKey(0)

### Notice the last operation inverts the image totally
Example #37
0
    def image_callback(self, data):
        # Store the image header in a global variable
        self.image_header = data.header

        # Time this loop to get cycles per second
        start = time.time()

        # Convert the ROS image to OpenCV format using a cv_bridge helper function
        frame = self.convert_image(data)

        # Some webcams invert the image
        if self.flip_image:
            frame = cv2.flip(frame, 0)

        # Store the frame width and height in a pair of global variables
        if self.frame_width is None:
            self.frame_size = (frame.shape[1], frame.shape[0])
            self.frame_width, self.frame_height = self.frame_size

        # Create the marker image we will use for display purposes
        if self.marker_image is None:
            self.marker_image = np.zeros_like(frame)

        # Copy the current frame to the global image in case we need it elsewhere
        self.frame = frame.copy()

        # Reset the marker image if we're not displaying the history
        if not self.keep_marker_history:
            self.marker_image = np.zeros_like(self.marker_image)

        # Process the image to detect and track objects or features
        processed_image = self.process_image(frame)

        # If the result is a greyscale image, convert to 3-channel for display purposes """
        #if processed_image.channels == 1:
        #cv.CvtColor(processed_image, self.processed_image, cv.CV_GRAY2BGR)
        #else:

        # Make a global copy
        self.processed_image = processed_image.copy()

        # Display the user-selection rectangle or point
        self.display_selection()

        # Night mode: only display the markers
        if self.night_mode:
            self.processed_image = np.zeros_like(self.processed_image)

        # Merge the processed image and the marker image
        self.display_image = cv2.bitwise_or(self.processed_image,
                                            self.marker_image)

        # If we have a track box, then display it.  The track box can be either a regular
        # cvRect (x,y,w,h) or a rotated Rect (center, size, angle).
        if self.show_boxes:
            if self.track_box is not None and self.is_rect_nonzero(
                    self.track_box):
                if len(self.track_box) == 4:
                    x, y, w, h = self.track_box
                    size = (w, h)
                    center = (x + w / 2, y + h / 2)
                    angle = 0
                    self.track_box = (center, size, angle)
                else:
                    (center, size, angle) = self.track_box

                # For face tracking, an upright rectangle looks best
                if self.face_tracking:
                    pt1 = (int(center[0] - size[0] / 2),
                           int(center[1] - size[1] / 2))
                    pt2 = (int(center[0] + size[0] / 2),
                           int(center[1] + size[1] / 2))
                    cv2.rectangle(self.display_image, pt1, pt2, (50, 255, 50),
                                  self.feature_size, 8, 0)
                else:
                    # Otherwise, display a rotated rectangle
                    vertices = np.int0(cv2.boxPoints(self.track_box))
                    cv2.drawContours(self.display_image, [vertices], 0,
                                     (50, 255, 50), self.feature_size)

            # If we don't yet have a track box, display the detect box if present
            elif self.detect_box is not None and self.is_rect_nonzero(
                    self.detect_box):
                (pt1_x, pt1_y, w, h) = self.detect_box
                if self.show_boxes:
                    cv2.rectangle(self.display_image, (pt1_x, pt1_y),
                                  (pt1_x + w, pt1_y + h), (50, 255, 50),
                                  self.feature_size, 8, 0)

        # Publish the ROI
        self.publish_roi()

        # Compute the time for this loop and estimate CPS as a running average
        end = time.time()
        duration = end - start
        fps = int(1.0 / duration)
        self.cps_values.append(fps)
        if len(self.cps_values) > self.cps_n_values:
            self.cps_values.pop(0)
        self.cps = int(sum(self.cps_values) / len(self.cps_values))

        # Display CPS and image resolution if asked to
        if self.show_text:
            font_face = cv2.FONT_HERSHEY_SIMPLEX
            font_scale = 0.5
            """ Print cycles per second (CPS) and resolution (RES) at top of the image """
            if self.frame_size[0] >= 640:
                vstart = 25
                voffset = int(50 + self.frame_size[1] / 120.)
            elif self.frame_size[0] == 320:
                vstart = 15
                voffset = int(35 + self.frame_size[1] / 120.)
            else:
                vstart = 10
                voffset = int(20 + self.frame_size[1] / 120.)
            cv2.putText(self.display_image, "CPS: " + str(self.cps),
                        (10, vstart), font_face, font_scale, (255, 255, 0))
            cv2.putText(
                self.display_image, "RES: " + str(self.frame_size[0]) + "X" +
                str(self.frame_size[1]), (10, voffset), font_face, font_scale,
                (255, 255, 0))

        # Update the image display
        cv2.imshow(self.node_name, self.display_image)
        cv2.setMouseCallback(self.node_name, self.on_mouse_click, None)
        # Process any keyboard commands
        self.keystroke = cv2.waitKey(5)
        if self.keystroke is not None and self.keystroke != -1:
            try:
                cc = chr(self.keystroke & 255).lower()
                if cc == 'n':
                    self.night_mode = not self.night_mode
                elif cc == 'f':
                    self.show_features = not self.show_features
                elif cc == 'b':
                    self.show_boxes = not self.show_boxes
                elif cc == 't':
                    self.show_text = not self.show_text
                elif cc == 'q':
                    # The has press the q key, so exit
                    rospy.signal_shutdown("User hit q key to quit.")
            except:
                pass
Example #38
0
rectangle = np.zeros((300, 300), dtype="uint8")
cv2.rectangle(rectangle, (25, 25), (275, 275), 255, -1)
cv2.imshow("Rectangle", rectangle)
cv2.waitKey(0)
circle = np.zeros((300, 300), dtype="uint8")
cv2.circle(circle, (150, 150), 150, 255, -1)
cv2.imshow("Circle", circle)
cv2.waitKey(0)
cv2.destroyAllWindows()

bitwiseAnd = cv2.bitwise_and(rectangle, circle)
cv2.imshow("AND", bitwiseAnd)
cv2.waitKey(0)
cv2.destroyAllWindows()

bitwiseOr = cv2.bitwise_or(rectangle, circle)
cv2.imshow("OR", bitwiseOr)
cv2.waitKey(0)
cv2.destroyAllWindows()

bitwiseXor = cv2.bitwise_xor(rectangle, circle)
cv2.imshow("XOR", bitwiseXor)
cv2.waitKey(0)
cv2.destroyAllWindows()

bitwiseNot = cv2.bitwise_not(circle)
cv2.imshow("NOT", bitwiseNot)
cv2.waitKey(0)
cv2.destroyAllWindows()
Example #39
0
    def DetectColor(self, image, color, returnType="segmented", process=False):

        numRows, numCols, channels = image.shape
        lower2 = upper2 = array([0, 0, 0])

        #definitions for upper and lower hsv values for each color
        if (color == 'orange'):  #0,50,170,10,254,255
            # for when lighting is dim
            #s_min = 50
            #s_max = 254
            #for when lighting is bright
            """
            s_min = 94
            s_max = 255
            hsv_boundaries = [( [0, s_min, 170],[10, s_max, 255] )]
            hsv_boundaries2 = [([174, s_min, 180],[180, s_max, 255])]
            """
            hsv_boundaries = [([0, 30, 40], [25, 255, 160])]
            hsv_boundaries2 = [([160, 30, 40], [180, 255, 160])]
            #hsv_boundaries2 = [([174, 61, 120],[180, 255, 255])]
            lower = array(hsv_boundaries[0][0], dtype="uint8")
            upper = array(hsv_boundaries[0][1], dtype="uint8")
            lower2 = array(hsv_boundaries2[0][0], dtype="uint8")
            upper2 = array(hsv_boundaries2[0][1], dtype="uint8")

        elif (color == 'front orange'):
            #0 50 0 25, 254 255
            hsv_boundaries = [([0, 55, 10], [60, 255, 255])]
            #168 50 0,180 254 255
            #lower  hsv boundary #170 140 150,179 255 255
            hsv_boundaries2 = [([168, 70, 10], [180, 254, 255])]
            lower = array(hsv_boundaries[0][0], dtype="uint8")
            upper = array(hsv_boundaries[0][1], dtype="uint8")
            lower2 = array(hsv_boundaries2[0][0], dtype="uint8")
            upper2 = array(hsv_boundaries2[0][1], dtype="uint8")

        elif (color == 'blue'):
            hsv_boundaries = [([100, 50, 70], [140, 255, 255])]
            lower = array(hsv_boundaries[0][0], dtype="uint8")
            upper = array(hsv_boundaries[0][1], dtype="uint8")
        elif (color == 'darkblue'):
            hsv_boundaries = [([90, 25, 70], [135, 255, 250])]
            lower = array(hsv_boundaries[0][0], dtype="uint8")
            upper = array(hsv_boundaries[0][1], dtype="uint8")
        elif (color == 'green'):
            hsv_boundaries = [([40, 70, 10], [70, 190, 254])]
            lower = array(hsv_boundaries[0][0], dtype="uint8")
            upper = array(hsv_boundaries[0][1], dtype="uint8")

        elif (color == 'white'):
            #duct tape
            #hsv_boundaries = [ ([161, 68, 127],[171, 209, 255])]
            # printed
            hsv_boundaries = [([10, 10, 10], [160, 160, 160])]
            lower = array(hsv_boundaries[0][0], dtype="uint8")
            upper = array(hsv_boundaries[0][1], dtype="uint8")
        elif (color == 'pink'):
            #duct tape
            #hsv_boundaries = [ ([161, 68, 127],[171, 209, 255])]
            # printed
            hsv_boundaries = [([161, 68, 127], [172, 255, 255])]
            lower = array(hsv_boundaries[0][0], dtype="uint8")
            upper = array(hsv_boundaries[0][1], dtype="uint8")
        elif (color == 'yellow'):
            hsv_boundaries = [([17, 10, 100], [32, 188, 255])]
            lower = array(hsv_boundaries[0][0], dtype="uint8")
            upper = array(hsv_boundaries[0][1], dtype="uint8")
        elif (color == 'test'):
            hsv_boundaries = [([1, 90, 120], [45, 255, 255])]
            #hsv_boundaries2 = [([178, 75, 120],[180, 255, 255])]
            #hsv_boundaries2 = [([174, 61, 120],[180, 255, 255])]
            lower = array(hsv_boundaries[0][0], dtype="uint8")
            upper = array(hsv_boundaries[0][1], dtype="uint8")
            #lower2=array(hsv_boundaries2[0][0], dtype = "uint8")
            #upper2=array(hsv_boundaries2[0][1], dtype = "uint8")

        else:
            raise Exception("Color not recognized")

#convert bgr to hsv image for color segmentation
        if color == 'white':
            hsv_image = image
        else:
            hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        #find colors within the boundaries for each pixel,returns binary image
        mask = mask1 = cv2.inRange(hsv_image, lower, upper)

        # if a second bound exists, we combine the two masks
        if (lower2.any() or upper2.any()):
            mask2 = cv2.inRange(hsv_image, lower2, upper2)
            #perform logical or to combine the masks
            mask = cv2.bitwise_or(mask1, mask2, mask=None)

#this sets any pixel in mask != 0 to its hsv color value from unsegmented image
        hsv_output = cv2.bitwise_and(hsv_image, hsv_image, mask=mask)
        #this converts the image from hsv to bgr
        if color == 'white':
            segmentedImage = hsv_output
        else:
            segmentedImage = cv2.cvtColor(hsv_output, cv2.COLOR_HSV2BGR)
        #we put a circle in the center of the image
        #cv2.circle(segmentedImage,(numcols/2,numrows/2),4,150,1)

        #rospy.logwarn(hsv_image[numRows/2][numCols/2])
        #rospy.logwarn("seg: " + str(hsv_output[numRows/2][numCols/2]))
        #segmentedImage is bgr, and mask is a binary image with values within color range
        if (returnType == "segmented"):
            return segmentedImage
        elif returnType == "hsv":
            return hsv_output
        elif returnType == "binary":
            return mask
        elif returnType == "all":
            return segmentedImage, hsv_output, mask
Example #40
0
def frame_diff(img_vold, img_old, new_img):
    img_diff0 = cv2.absdiff(new_img, img_old)
    img_diff1 = cv2.absdiff(img_old, img_vold)
    return cv2.bitwise_or(img_diff0, img_diff1)
# -*- coding: utf-8 -*-
import cv2
import numpy as np

image1 = np.zeros((300, 300), np.uint8)
image2 = np.zeros((300, 300), np.uint8)

square = cv2.rectangle(image1, (100, 150), (200, 250), (255, 255, 255), -1)
circle = cv2.circle(image2, (150, 150), 100, (255, 255, 255), -1)

cv2.imshow('image1', image1)
cv2.imshow('image2', image2)

#bitwise operations

And = cv2.bitwise_and(image1, image2)
OR = cv2.bitwise_or(image1, image2)
XOR = cv2.bitwise_xor(image1, image2)
NOT = cv2.bitwise_not(image1)

cv2.imshow('bitwise_and', And)
cv2.imshow('bitwise_or', OR)
cv2.imshow('bitwise_xor', XOR)
cv2.imshow('bitwise_not', NOT)

cv2.waitKey()
cv2.destroyAllWindows()
Example #42
0
def readFrame(frame, isintersect):

	rho_out = []
	theta_outL = []
	theta_outR = []
	theta_out = []
	intersectDetectRow = 142 # need to test this constant

	imgSmall = cv2.resize(frame,(150,150),cv2.INTER_AREA)
	img=cv2.cvtColor(imgSmall,cv2.COLOR_BGR2HSV)
	
	# lowboundsYellow = np.array([10,59,67])
	# upboundsYellow = np.array([39,229,219])

	lowboundsYellow = np.array([15,51,34])
	upboundsYellow = np.array([34,248,220])

	lbPurp = np.array([141,18,97])
	ubPurp = np.array([170,250,255])

	maskY = cv2.inRange(img, lowboundsYellow, upboundsYellow)
	maskP = cv2.inRange(img, lbPurp, ubPurp)
	maskTot = cv2.bitwise_or(maskY,maskP)
	# maskB = cv2.inRange(img,np.array([7,61,58]),np.array([115,222,255]))
	TopLine = maskTot[0,0:75]
	if np.any(TopLine==255):
		distance = np.max(np.where(TopLine==255))
	else:
		distance = 75

	res = cv2.bitwise_and(imgSmall,imgSmall, mask=maskTot)

	gray = cv2.cvtColor(res,cv2.COLOR_BGR2GRAY)

	edges = cv2.Canny(gray,50,150,apertureSize = 3)

	lines = cv2.HoughLines(edges,0.5,np.pi/180,35)
	# Left Line searches the left half of the frames (all rows, columns from 0 up to 70)
	leftLines = cv2.HoughLines(edges[:,0:70],0.5,np.pi/180,20) #Colin - looking at top and bottom of screen or left and right???
	# Colin: looking at the left half of the screen

	# Right Line searches the right half of hte frame (all rows, columns from 80 up to 149.)
	# Indexing starts at 0. A frame of width 150 starts at 0 and ends it's index at 149

	rightLines = cv2.HoughLines(edges[:,80:149],0.5,np.pi/180,20)
	# centerline = cv2.HoughLines(edges[:,]) #Tasha did this for the center of intersection to start left turn
	# See my updates in readFrameLeft (Colin)

	if np.any(leftLines != None):
		for line in leftLines:
			line = line[0]
			rho = line[0]
			theta = line[1]
			rho_out.append(rho)
			theta_outL.append(theta)
	if np.any(rightLines != None):
		for line in rightLines:
			line = line[0]
			rho = line[0]
			theta = line[1]
			rho_out.append(rho)
			theta_outR.append(theta)
	if np.any(lines != None):
		for line in lines:
			line = line[0]
			rho = line[0]
			theta = line[1]
			rho_out.append(rho)
			theta_out.append(theta)

	# Intersection Detection

	# interLine = maskY[intersectDetectRow]
	# findLine = np.amax(np.where(np.equal(interLine,255)))
	# if findLine < 100:
	# 	isintersect = True
	# elif findLine > 100 and isintersect:
	# 	isintersect = False
	# l1 = interDetect(maskB,intersectDetectRow-1,isintersect)
	# l2 = interDetect(maskB,intersectDetectRow,isintersect)
	# l3 = interDetect(maskB,intersectDetectRow+1,isintersect)

	# New Intersection Detection
	ar1 = np.where(maskB[130,:]==255)
	if ar1[0].size:
		l1 = np.max(ar1)
		if l1<75:
			checkTop = True
		else:
			checkTop = False
	else:
		checkTop = False

	l3 = np.max(np.where(maskTot[137,:]==255))
	if l3<75:
		checkMid = True
	else:
		checkMid = False
	l2 = np.max(np.where(maskTot[145,:]==255))
	if l2<75:
		checkBot = True
	else:
		checkBot = False

	if sum([checkTop,checkMid,checkBot]) > 1:
		isintersect = True
	else:
		isintersect = False

	# print "Intersection Detection: ", l1,l2,l3
	# if sum([l1,l2,l3]) > 1:
	# 	isintersect = True
	# # elif sum([l1,l2,l3]) < 1 and isintersect:
	# # 	isintersect = False
	# else:
	# 	isintersect = False

	# print "Left Lane Angles:", theta_outL
	# print "Right Lane Angles:", theta_outR
	return rho_out, theta_out, theta_outL, theta_outR, distance, isintersect
import cv2
import numpy as np

square = np.zeros((300, 300), np.uint8)
cv2.rectangle(square, (50, 50), (250, 250), 255, -1)
cv2.imshow("square", square)
cv2.waitKey(0)

ellipse = np.zeros((300, 300), np.uint8)
cv2.ellipse(ellipse, (150, 150), (150, 150), 30, 0, 180, 255, -1)
cv2.imshow("Ellipse", ellipse)
cv2.waitKey()

And = cv2.bitwise_and(square, ellipse)
cv2.imshow("And", And)
cv2.waitKey(0)

Or = cv2.bitwise_or(square, ellipse)
cv2.imshow("OR", Or)
cv2.waitKey(0)

Not = cv2.bitwise_not(square)
cv2.imshow("Not", Not)
cv2.waitKey(0)

Xor = cv2.bitwise_xor(square, ellipse)
cv2.imshow("Xor", Xor)
cv2.waitKey(0)
import cv2
import numpy as np
import matplotlib.pyplot as plt

img = cv2.imread('messi5.jpg', cv2.IMREAD_GRAYSCALE)

lap = cv2.Laplacian(img, cv2.CV_64F, ksize = 3)
lap = np.uint8(np.absolute(lap)) # gives abs value of laplacian image transformation

sobelX = cv2.Sobel(img, cv2.CV_64F, 1, 0)
sobelY = cv2.Sobel(img, cv2.CV_64F, 0, 1)

sobelX = np.uint8(np.absolute(sobelX))
sobelY = np.uint8(np.absolute(sobelY))

sobelCombine = cv2.bitwise_or(sobelX, sobelY)


titles = ['image', 'laplacian', 'sobelX', 'sobelY', 'sobelCombine']

images = [img, lap, sobelX, sobelY, sobelCombine]

for i in range(5):
    plt.subplot(2,3,i+1), plt.imshow(images[i], 'gray')
    plt.title(titles[i])
    plt.xticks([]), plt.yticks([])

plt.show()
Example #45
0
def main_loop_mono(num, newpath, camera_interface, autopilot_interface):
    img = camera_interface.capture_frame()

    # Once we have the original image, we need to take the red and nir channels to operate with them

    b = np.array(img[:, :, 0]).astype(float) + 0.00000000001
    r = np.array(img[:, :, 2]).astype(float) + 0.00000000001

    # we want to delete shadows from the original image, as they are introducing distorsions
    lower_limit = np.array([3, 3, 3])
    upper_limit = np.array([255, 255, 255])
    shadows = cv2.inRange(img, lower_limit, upper_limit)

    # using the blue filter, red channel is NIR band and blue channel is visible light

    kernel = np.ones((1, 1), np.uint8)
    dilation = cv2.dilate(r, kernel, iterations=10)
    nir = dilation
    red = b

    np.seterr(divide='ignore', invalid='ignore')

    # we compute the ndvi
    ndvi = ((nir - red) / (nir + red)).astype(float)

    # once we have the ndvi in (-1, 1) scale, we convert it to 0-255 scale to operate with opencv

    ndvi_contrasted = contrast_stretch(ndvi).astype(np.uint8)

    # we delete the shadows from the ndvi re-scaled image
    ndvi_new = cv2.bitwise_or(ndvi_contrasted, ndvi_contrasted, mask=shadows)

    median = cv2.bilateralFilter(ndvi_new, 10, 75, 75)
    ndvi_new = median

    # we apply some morphological operations to enhance vegetation

    kernel = np.ones((1, 1), np.uint8)
    erosion = cv2.erode(ndvi_new, kernel, iterations=1)

    kernel = np.ones((2, 2), np.uint8)
    dilation = cv2.dilate(erosion, kernel, iterations=1)

    ndvi_new = dilation
    ndvi_values = np.count_nonzero(ndvi_new > 126)

    # once we have the final image with vegetation, we remove everything that is under 0.14 (163) NDVI value
    ndvi_new[ndvi_new < 163] = 0
    values_ndvi = np.count_nonzero(ndvi_new > 0)

    total_values = ndvi_new.shape[0] * ndvi_new.shape[1]

    percent = round(((ndvi_values / total_values) * 100), 2)

    if percent >= 5:

        name = newpath + '/' + 'raw_images' + '/' + str(num) + '.jpeg'
        name_ndvi = newpath + '/' + 'ndvi_images' + '/' + str(num) + '.jpeg'

        # name = path + '/' + 'ndvi_results' + '/' + 'image' + 'ndvi' + str(percent) + '.jpeg'

        # we save the raw image
        cv2.imwrite(name, img)

        # median = cv2.bilateralFilter(ndvi_final, 10, 75, 75)
        # ndvi_final = median
        # to create the final output, we want to add what is vegetation to the raw image

        mask_vegetation = cv2.inRange(ndvi_new, 163, 255)
        res = cv2.bitwise_and(img, img, mask=cv2.bitwise_not(mask_vegetation))

        ndvi_final = cv2.cvtColor(ndvi_new, cv2.COLOR_GRAY2BGR)
        ndvi_result = cv2.bitwise_and(ndvi_final,
                                      ndvi_final,
                                      mask=mask_vegetation)

        # fusion is the final output, containing vegetation and original image
        fusion = res + ndvi_result

        # we want to tag each corner of the image with its real geographical coordinates

        tag_images = autopilot_interface.image_coordinates()
        vertex_coordinates = get_coordinates(tag_images[0], tag_images[1],
                                             tag_images[2], tag_images[3],
                                             tag_images[4])

        img = fusion

        fusion = camera_interface.tag_image(img, vertex_coordinates)

        cv2.imwrite(name_ndvi, fusion)

        # once we have saved the final output, we save interesting data on the json file

        data_drone = autopilot_interface.set_data_drone()

        image_settings = camera_interface.camera_settings()

        path_json = '/results/photos/' + str(
            timestamp) + '/' + 'ndvi_images' + '/' + str(num) + '.jpeg'
        flight_info = camera_interface.write_json(timestamp, num, percent,
                                                  data_drone, image_settings,
                                                  path_json)

        print('@@@ image processed @@@')
        return flight_info

    else:

        name = newpath + '/' + 'raw_images' + '/' + str(num) + '.jpeg'

        # name = path + '/' + 'ndvi_results' + '/' + 'image' + 'ndvi' + str(percent) + '.jpeg'

        cv2.imwrite(name, img)
        return None
kernel = np.ones((3,3))

draw = False
lift = 1

while True:
    _, frame = cam.read()
    frame = cv2.flip(frame, 1)
    blur = cv2.GaussianBlur(frame, (15,15) , 0)
    frame = cv2.morphologyEx(frame, cv2.MORPH_OPEN, kernel)
    
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

    mask = cv2.inRange(hsv, ll, ul)
    res = cv2.bitwise_or(frame,frame, mask=mask)

    im2, contours, hierarchy = cv2.findContours(mask,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)

    maxarea = 0

    try:
        c = max(contours, key = cv2.contourArea)
        if cv2.contourArea(c) > 150:
            
            (xcirc , ycirc) , radius = cv2.minEnclosingCircle(c)
            center = (int(xcirc) , int(ycirc))
            radius = int(radius)

            xdraw = int(xcirc)
            ydraw = int(ycirc)
Example #47
0
def coneDetect(frame):
    frame = cv2.resize(frame, (416, 416))
    img_HSV = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV)
    img_thresh_low = cv2.inRange(
        img_HSV, np.array([0, 135, 135]),
        np.array([15, 255,
                  255]))  # everything that is included in the "left red"

    img_thresh_high = cv2.inRange(
        img_HSV, np.array([159, 135, 135]),
        np.array([179, 255,
                  255]))  # everything that is included in the "right red"

    img_thresh_mid = cv2.inRange(
        img_HSV, np.array([100, 150, 0]),
        np.array([140, 255,
                  255]))  # everything that is included in the "right red"

    img_thresh = cv2.bitwise_or(img_thresh_low,
                                img_thresh_mid)  # combine the resulting image
    img_thresh = cv2.bitwise_or(img_thresh, img_thresh_high)
    kernel = np.ones((5, 5))
    img_thresh_opened = cv2.morphologyEx(img_thresh, cv2.MORPH_OPEN, kernel)
    img_thresh_blurred = cv2.medianBlur(img_thresh_opened, 5)
    img_edges = cv2.Canny(img_thresh_blurred, 80, 160)
    contours, _ = cv2.findContours(np.array(img_edges), cv2.RETR_EXTERNAL,
                                   cv2.CHAIN_APPROX_SIMPLE)
    img_contours = np.zeros_like(img_edges)
    cv2.drawContours(img_contours, contours, -1, (255, 255, 255), 2)
    approx_contours = []

    for c in contours:
        approx = cv2.approxPolyDP(c, 10, closed=True)
        approx_contours.append(approx)
    img_approx_contours = np.zeros_like(img_edges)
    cv2.drawContours(img_approx_contours, approx_contours, -1, (255, 255, 255),
                     1)
    all_convex_hulls = []
    for ac in approx_contours:
        all_convex_hulls.append(cv2.convexHull(ac))
    img_all_convex_hulls = np.zeros_like(img_edges)
    cv2.drawContours(img_all_convex_hulls, all_convex_hulls, -1,
                     (255, 255, 255), 2)
    convex_hulls_3to10 = []
    for ch in all_convex_hulls:
        if 3 <= len(ch) <= 10:
            convex_hulls_3to10.append(cv2.convexHull(ch))
    img_convex_hulls_3to10 = np.zeros_like(img_edges)
    cv2.drawContours(img_convex_hulls_3to10, convex_hulls_3to10, -1,
                     (255, 255, 255), 2)

    def convex_hull_pointing_up(ch):
        '''Determines if the path is directed up.
        If so, then this is a cone. '''

        # contour points above center and below

        points_above_center, points_below_center = [], []

        x, y, w, h = cv2.boundingRect(
            ch
        )  # coordinates of the upper left corner of the describing rectangle, width and height
        aspect_ratio = w / h  # ratio of rectangle width to height

        # if the rectangle is narrow, continue the definition. If not, the circuit is not suitable
        if aspect_ratio < 0.8:
            # We classify each point of the contour as lying above or below the center
            vertical_center = y + h / 2

            for point in ch:
                if point[0][
                        1] < vertical_center:  # if the y coordinate of the point is above the center, then add this point to the list of points above the center
                    points_above_center.append(point)
                elif point[0][1] >= vertical_center:
                    points_below_center.append(point)

            # determine the x coordinates of the extreme points below the center
            left_x = points_below_center[0][0][0]
            right_x = points_below_center[0][0][0]
            for point in points_below_center:
                if point[0][0] < left_x:
                    left_x = point[0][0]
                if point[0][0] > right_x:
                    right_x = point[0][0]

            # check if the upper points of the contour lie outside the "base". If yes, then the circuit does not fit
            for point in points_above_center:
                if (point[0][0] < left_x) or (point[0][0] > right_x):
                    return False
        else:
            return False

        return True

    cones = []
    bounding_rects = []
    for ch in convex_hulls_3to10:
        if convex_hull_pointing_up(ch):
            cones.append(ch)
            rect = cv2.boundingRect(ch)
            bounding_rects.append(rect)
    img_res = frame.copy()
    cv2.drawContours(img_res, cones, -1, (255, 255, 255), 2)
    transf = np.zeros([450, 600, 3])

    for rect in bounding_rects:
        #print('previous', rect[0], rect[1], rect[2], rect[3])
        cv2.rectangle(img_res, (rect[0], rect[1]),
                      (rect[0] + rect[2], rect[1] + rect[3]), (1, 255, 1), 6)
        cv2.circle(img_res, (rect[0], rect[1]), 5, (0, 200, 255), -1)
        cv2.circle(img_res, (rect[0] + rect[2], rect[1] + rect[3]), 5,
                   (0, 200, 255), -1)
        cv2.circle(img_res, (rect[0] + rect[2] // 2, rect[1] + rect[3]), 5,
                   (255, 255, 255), -1)

    return bounding_rects, img_res
Example #48
0
def combine_hsl_isolated_with_original(img, hsl_yellow, hsl_white):
    hsl_mask = cv2.bitwise_or(hsl_yellow, hsl_white)
    return cv2.bitwise_and(img, img, mask=hsl_mask)