Ejemplo n.º 1
1
    def __test_safety_zone_pic(pic):
        hsv = cv2.cvtColor(pic.img, cv2.COLOR_BGR2HSV)
        mask = cv2.inRange(hsv, Tester.__LOWER, Tester.__UPPER)
        mask = cv2.erode(mask, None, iterations=2)
        mask = cv2.GaussianBlur(mask, (1, 1), 0)

        contours, hierarchy = cv2.findContours(mask.copy(), cv2.RETR_TREE,
                                               cv2.CHAIN_APPROX_SIMPLE)[1:3]

        i = -1
        for c in contours:
            i += 1
            perimeter = cv2.arcLength(c, True)
            approx = cv2.approxPolyDP(c, 0.03 * perimeter, True)
            if hierarchy[0][i][3] != -1 or cv2.contourArea(
                    c) < 200 or not cv2.isContourConvex(approx):
                continue
            if len(approx) == 4:
                children = Tester.__get_children(hierarchy, i, contours)
                if len(children) > 0:
                    bigchild = Tester.__biggest_child(children)
                    cv2.drawContours(frame, [bigchild], -1, (0, 255, 0), 10)
                    blurred = cv2.GaussianBlur(hsv, (5, 5), 0)
                    centerpt = Tester.__center_of_contour(bigchild)
                    if Tester.__is_point_red(centerpt, blurred):
                        perimeterchild = cv2.arcLength(bigchild, True)
                        approxchild = cv2.approxPolyDP(bigchild,
                                                       0.01 * perimeter,
                                                       True)
                        if len(approxchild) > 6:
                            return Tester.SAFETY_ZONE_DETECTED, centerpt
        return Tester.NOTHING_DETECTED, (0, 0)
Ejemplo n.º 2
0
    def buildMapWithAllFilter(self, frame, map):
        blurMapImage = cv2.GaussianBlur(frame, (5, 5), 0)
        for gray in cv2.split(blurMapImage):
            for threshold in xrange(0, 255, 24):
                if threshold == 0:
                    binary = cv2.Canny(gray, 0, 100, apertureSize=5)
                    binary = cv2.dilate(binary, None)
                else:
                    retval, binary = cv2.threshold(gray, threshold, 255, cv2.THRESH_BINARY)
                contours, hierarchy = cv2.findContours(binary, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
                for contour in contours:
                    contour_len = cv2.arcLength(contour, True)
                    lessPreciseContour = cv2.approxPolyDP(contour, 0.05*contour_len, True)
                    contour = cv2.approxPolyDP(contour, 0.02*contour_len, True)

                    if cv2.contourArea(contour) > self.MIN_SHAPE_SIZE and cv2.isContourConvex(contour) and cv2.contourArea(contour) < 3000:
                        myShape = self.shapeFactory.ConstructShape(contour)
                        if myShape.isEqualEdges() and myShape.checkAngleValue():
                            map.addShape(myShape)

                    if cv2.contourArea(contour) > self.LIMIT_SIZE and cv2.isContourConvex(contour):
                        if len(contour) == 4:
                            map.setMapLimit(contour)

                    if cv2.contourArea(lessPreciseContour) > self.LIMIT_SIZE and cv2.isContourConvex(lessPreciseContour):
                        if len(lessPreciseContour) == 4:
                            map.setMapLimit(lessPreciseContour)

        self.buildByColorClosing(frame, map)
        self.buildByColorOpening(frame, map)
        map.setShapesColor(frame)
        map.filterRobot()
        map.deleteBlackShapes()

        return map
Ejemplo n.º 3
0
    def detectShapes(self):
        src = self.mImg;
        if( src.empty() ):
            return;

        # Convert to gray scale
        gayimg = cv2.cvtColor( src, cv2.COLOR_BGR2GRAY)

        # use Canny instead of threshold to catch squares with gradient shading
        blackWhite = cv2.Canny(grayimg, 0, 50, 5 )

        # find contours
        contours = cv2.findContours( blackWhite.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE);

        for i in range(0, len(contours)):
            # Approximate contour with accuracy aproportional to the contour perimeter
            approx = cv2.approxPolyDP( contours[i], cv2.arcLength(contours[i], True ) * 0.02, True )

            # skip small or non-convex objects
            if( numpy.abs( cv2.contourArea( contours[i] ) ) < 00 or (not cv2.isContourConvex( approx )) ):
                continue
            
            if( approx.size() == 3 ):
                pass
        pass
Ejemplo n.º 4
0
def containsRectangle(img):
    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    height, width = img.shape[:2]
    square = None
    maxArea = -1
    _,img = cv2.threshold(img, 250, 255, 1)
    _,contours, hierarchy = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    thresholdArea = int((height*width)*0.01)
    for cnt in contours:
        if len(cnt) < 4:
            continue
        contourArea = cv2.contourArea(cnt)
        if (contourArea < thresholdArea):
            continue

        cnt_len = cv2.arcLength(cnt, True)
        cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True)

        if len(cnt) == 4 and cv2.contourArea(cnt) > 1000 and cv2.isContourConvex(cnt):
            cnt = cnt.reshape(-1, 2)
            max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)])
            if max_cos < 0.1:
                if contourArea > maxArea:
                    square = cnt
                    maxArea = contourArea
    if square!=None:
        square = makeConventionalRectangle(square)
    return square
Ejemplo n.º 5
0
def get_contours(img_name, base_path):
  img = cv2.imread(base_path + '/' + img_name)
  gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
  bin_img = cv2.threshold(gray, 200, 255, cv2.THRESH_BINARY)[1]

  (edges, contours, hierarchy) = cv2.findContours(bin_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

  rects = []
  approxes = []

  for i,contour in enumerate(contours):
    perim = cv2.arcLength(contour, True)
    approx = cv2.approxPolyDP(contour, perim * 0.02, True)
    approxes.append(approx)

    # If approximated with a quadrilateral, we want to save
    # and cv2.contourArea(cnt) > 1000 and cv2.isContourConvex(cnt)
    # Going to need to check if it detects rectangles with multiple cells, and cut those elsewhere
    # and use them for table label info like title, etc.

    if len(approx) == 4 and cv2.isContourConvex(approx) and cv2.contourArea(approx) > 200:
      temp_contour = approx.reshape(-1, 2)
      max_cos = np.max([angle_cos(temp_contour[i], temp_contour[(i+1) % 4], temp_contour[(i+2) % 4]) for i in range(4)])

      if max_cos < 0.1:
        rects.append((i,approx)) # Keep the index and the contour

  return (rects, hierarchy)
Ejemplo n.º 6
0
def find_page_with_morphology(img):
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    median = cv2.medianBlur(gray, 3)
    dilated = cv2.dilate(median, kernel=None, iterations=2)
    eroded = cv2.erode(median, kernel=None, iterations=1)
    gradient = dilated - eroded
    show('Gradient', gradient)
    # Compute the best threshold to separate two peaks of the histogram (OTSU method)
    optimal_threshold, otsu = cv2.threshold(gradient, 0, 255, cv2.THRESH_BINARY or cv2.THRESH_OTSU)
    ret_val, binarized = cv2.threshold(gradient, optimal_threshold, 255, cv2.THRESH_BINARY)

    show('Binarized', binarized)
    
    contours, hierarchy = cv2.findContours(binarized, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    polys = [approx_poly(cnt) for cnt in contours]

    poly_img = draw_contours(img, polys)
    show('All polys', poly_img)
    
    min_area = 50*50
    cos_epsilon = .1
    quads = [poly for poly in polys
                if len(poly) == 4
                and cv2.contourArea(poly) > min_area
                and cv2.isContourConvex(poly)
                and max_cos(poly) < cos_epsilon]

    quads_img = draw_contours(img, quads)
    show('Larger rectangluar polys', quads_img)
    largest = sorted(quads, key=lambda q: cv2.contourArea(q))[-1]

    page_img = draw_contours(img, [largest])
    show('Page', page_img)
Ejemplo n.º 7
0
def find_triangles(img, image_area):
  img = cv2.GaussianBlur(img, (5, 5), 0)
  allowable_area = image_area - 100000
  squares = []
  for gray in cv2.split(img):
    for thrs in xrange(0, 255, 26):
      if thrs == 0:
        bin = cv2.Canny(gray, 0, 50, apertureSize=5)
        bin = cv2.dilate(bin, None)
      else:
        retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY)
      contours, hierarchy = cv2.findContours(bin, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
      print('=========')
      print(len(contours))    
      for idx,cnt in enumerate(contours):
        cnt_len = cv2.arcLength(cnt, True)
        cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True)
        current_size = cv2.contourArea(cnt)
        if len(cnt) == 3 and current_size > 100 and cv2.isContourConvex(cnt):
          #cnt = cnt.reshape(-1, 2)
          #max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)])
          #if max_cos < 0.1:
          #  if current_size < allowable_area:
          squares.append(cnt)
  return squares
Ejemplo n.º 8
0
def find_squares(image_path):
    img = cv2.imread(image_path)
    img = cv2.GaussianBlur(img, (5, 5), 0)
    onesquare = []
    squares = []
    preview = []

    for gray in cv2.split(img):
        for thrs in xrange(0, 255, 26):
            if thrs == 0:
                bin = cv2.Canny(gray, 0, 50, apertureSize=5)
                bin = cv2.dilate(bin, None)
            else:
                retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY)
            contours, hierarchy = cv2.findContours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
            for cnt in contours:
                cnt_len = cv2.arcLength(cnt, True)
                cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True)
                if len(cnt) == 4 and cv2.contourArea(cnt) > 1000 and cv2.isContourConvex(cnt):
                    cnt = cnt.reshape(-1, 2)
                    max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)])
                    if max_cos < 0.1:
                        onesquare.append((cnt[0][0], cnt[0][1]))
                        onesquare.append((cnt[1][0], cnt[1][1]))
                        onesquare.append((cnt[2][0], cnt[2][1]))
                        onesquare.append((cnt[3][0], cnt[3][1]))
                        squares.append(onesquare)
                        onesquare = []
                        preview.append(cnt)
    return squares, preview
    def detecta_gafet(self, frame_inicial):
        """se abre una imagen tomada en el instante que se detecta movimiento con la finalidad de procesarla"""
        from glob import glob

        cv.SaveImage("output.png", frame_inicial)
        for fn in glob("output.png"):
            img = cv2.imread(fn)
            # se elimina ruido con blur
            img = cv2.GaussianBlur(img, (5, 5), 0)
            squares = []
            """si se encuentran contornos parecidos alos de un rectangulo es un gafet"""
            for gray in cv2.split(img):
                for thrs in xrange(0, 255, 26):
                    if thrs == 0:
                        # se sacan los bordes y los dilata
                        bin = cv2.Canny(gray, 0, 50, apertureSize=5)
                        bin = cv2.dilate(bin, None)
                    else:
                        retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY)
                    contours, hierarchy = cv2.findContours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
                    for cnt in contours:
                        cnt_len = cv2.arcLength(cnt, True)
                        cnt = cv2.approxPolyDP(cnt, 0.02 * cnt_len, True)
                        if len(cnt) == 4 and cv2.contourArea(cnt) > 1000 and cv2.isContourConvex(cnt):
                            cnt = cnt.reshape(-1, 2)
                            max_cos = np.max(
                                [self.angle_cos(cnt[i], cnt[(i + 1) % 4], cnt[(i + 2) % 4]) for i in xrange(4)]
                            )
                            # si coincide la aproximacion es un rectangulo
                            if max_cos < 0.1:
                                squares.append(cnt)
        if squares:
            print "si hay gaffete"
        else:
            self.detect()
Ejemplo n.º 10
0
def is_square(contour):
    """
    Squareness checker

    Square contours should:
        -have 4 vertices after approximation, 
        -have relatively large area (to filter out noisy contours)
        -be convex.
        -have angles between sides close to 90deg (cos(ang) ~0 )
    Note: absolute value of an area is used because area may be
    positive or negative - in accordance with the contour orientation
    """
    area = math.fabs( cv2.contourArea(contour) )

    isconvex = cv2.isContourConvex(contour)
    s = 0

    if len(contour) == 4 and area > 1000 and isconvex:

        for i in range(1, 4):
            # find minimum angle between joint edges (maximum of cosine)
            pt1 = contour[i]
            pt2 = contour[i-1]
            pt0 = contour[i-2]
            t = math.fabs(angle(pt0, pt1, pt2))
            if s <= t:s = t

        # if cosines of all angles are small (all angles are ~90 degree) 
        # then its a square
        if s < 0.3:return True

    return False       
Ejemplo n.º 11
0
def draw_contours(img, contours, hierarchy, y, detection_color):    
    form_contours = []
    for i, cnt in enumerate(contours):
        epsilon = y * cv2.arcLength(cnt,True) # 0.007, adjusted by experimentation
        # logger.info('For epsilon= ' + str(epsilon))       
        approx = cv2.approxPolyDP(cnt,epsilon,True)

        if hierarchy[_RETR_TREE][i][_ID_CHILD] == -1: # if the contour has no child
            if cv2.isContourConvex(approx): 
                if len(approx) > 5:
                    if is_circle(approx) and cv2.contourArea(approx) > 1000:
                        cv2.drawContours(img,[approx],0 ,detection_color,1)
                        #form_contours.append(approx)
                        form_contours.append(approx)
                        #approx = draw_approx(img, idx(2, i, hierarchy), contours, y, detection_color)
                        if len(approx) > 0: 
                            form_contours.append(approx)
                        # cv2.putText(img, 'o', (int(x + w), int(y + h)), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 255))
                    #else:
                        #cv2.drawContours(img,[approx],0 ,(255,255,0),1)
                        # cv2.putText(img, 'D', (int(x + w), int(y + h)), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 255))        
                #elif len(approx) == 4:
                #    approx = approx.reshape(-1, 2)
                #    max_cos = np.max([angle_cos( approx[i], approx[(i + 1) % 4], approx[(i + 2) % 4] ) for i in xrange(4)])
                #    if max_cos < 0.1:                        
                #        cv2.drawContours(img,[approx],0 ,(200, 33, 50),1)
                        # cv2.putText(img, 'u"\u25A0"', (int(x + w), int(y + h)), cv2.FONT_HERSHEY_PLAIN, 1, (15, 255, 128))
            #else:
                #cv2.drawContours(img,[approx],0 ,(0, 255, 0),1) 
                # cv2.putText(img, 'c', (int(x + w), int(y + h)), cv2.FONT_HERSHEY_PLAIN, 1, (0,255,0))  
    return form_contours
Ejemplo n.º 12
0
def imgproc(frame):
    # convert color to gray scale and show it
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    cv2.imshow('gray', gray)

    blur = cv2.blur(gray, (5, 5))
    edge = cv2.Canny(blur, 10, 100)
    edge = cv2.blur(edge, (2, 2))
    cv2.imshow('blured edge', edge)

    # convert image to black and white and show it
    thresh1, thresh = cv2.threshold(edge, 60, 120, cv2.THRESH_BINARY)
    cv2.imshow('thresh', thresh)

    # find contours!
    contours, hry = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

    # draw all the contours
    cpframe = frame.copy()
    cv2.drawContours(cpframe, contours, -1, (0, 255, 0), 3)
    cv2.imshow('cpframe', cpframe)

    # ================== TODO ===================

    # Modify these code to suit your need
    contours = [ctr for ctr in contours if cv2.contourArea(ctr) > 100]
    contours = [cv2.approxPolyDP(ctr, 5, True) for ctr in contours]
    contours = [ctr for ctr in contours if len(ctr) == 4]
    contours = [ctr for ctr in contours if cv2.isContourConvex(ctr)]

    # ============================================

    # draw on the frame
    cv2.drawContours(frame, contours, -1, (0, 255, 0), 3)
    return frame
Ejemplo n.º 13
0
def find_cards(img):
    img = cv2.GaussianBlur(img, (5, 5), 0)
    cards = []

    for gray in cv2.split(img):
        for thrs in xrange(0, 255, 26):
            if thrs == 0:
                bin = cv2.Canny(gray, 0, 50, apertureSize=5)
                bin = cv2.dilate(bin, None)
            else:
                retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY)
            bin, contours, hierarchy = cv2.findContours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
            for cnt in contours:
                leftmost = tuple(cnt[cnt[:, :, 0].argmin()][0])
                rightmost = tuple(cnt[cnt[:, :, 0].argmax()][0])
                topmost = tuple(cnt[cnt[:, :, 1].argmin()][0])
                bottommost = tuple(cnt[cnt[:,:,1].argmax()][0])
                cnt_len = cv2.arcLength(cnt, True)
                cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True)
                if len(cnt) == 4 and cv2.contourArea(cnt) > 1000 and cv2.isContourConvex(cnt):
                    cnt = cnt.reshape(-1, 2)
                    max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)])
                    if max_cos < 0.1 and cv2.contourArea(cnt) < 50000:
                        cards.append(cv2.boundingRect(np.array([leftmost, rightmost, topmost, bottommost])))
    return cards
Ejemplo n.º 14
0
def find_squares(img):
    #img = cv2.GaussianBlur(img, (5, 5), 0)
    squares = []

    imgHSV = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
    hChannelImg = imgHSV[:,:,0]
    sChannelImg = imgHSV[:,:,1]

    blurredHImg = cv2.GaussianBlur(hChannelImg,(11,11),0,0)
    blurredSImg = cv2.GaussianBlur(sChannelImg,(11,11),0,0)
    hThreshImg = cv2.inRange(blurredHImg,0,10)
    sThreshImg = cv2.inRange(blurredSImg,155,255)
    combImg = cv2.bitwise_and(hThreshImg,sThreshImg)

    for gray in cv2.split(img):
        for thrs in xrange(0, 255, 26):
            if thrs == 0:
                #finds the edges os the square using canny and dilate
                bin = cv2.Canny(gray, 0, 20, apertureSize=5)
                bin = cv2.dilate(bin, None)
            else:
                retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY)
            contours, hierarchy = cv2.findContours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
            for cnt in contours:
                cnt_len = cv2.arcLength(cnt, True)
                cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True)
                if len(cnt) == 4 and cv2.contourArea(cnt) > 1000 and cv2.isContourConvex(cnt):
                    cnt = cnt.reshape(-1, 2)
                    max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)])
                    if max_cos < 0.1:
                        squares.append(cnt)
    return squares
Ejemplo n.º 15
0
def find_shapes(img):
    squares = []
    circles = []
    other = []
    contours, hierarchy = cv2.findContours(img, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    for cnt in contours:
        cnt_len = cv2.arcLength(cnt, True)
        cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True)
        area=cv2.contourArea(cnt)
        if(area>=1000):
           pass
           #print "Contour:",len(cnt),cnt_len,area,abs(cnt_len/math.sqrt(area) -2*math.sqrt(math.pi))
        if(area < 1000):
           #other.append(cnt)
           # ignore speckles
           pass
        elif len(cnt) == 4 and cv2.contourArea(cnt) > 1000 and cv2.isContourConvex(cnt):
            cnt = cnt.reshape(-1, 2)
            max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)])
            if max_cos < 0.1:
                squares.append(cnt)
            else:
                other.append(cnt)
        else:
            # perimeter to sqroot area ratio is two x sqrt(pi)
            if(abs(cnt_len/math.sqrt(area)-2*math.sqrt(math.pi)) < 0.5):
                circles.append(cnt)
            else: 
                other.append(cnt)
    return squares,circles,other
Ejemplo n.º 16
0
def imgproc(frame):
    
    # convert color to gray scale and show it
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    cv2.imshow('gray', gray)
    
    # convert image to black and white and show it
    thresh1, thresh = cv2.threshold(gray, 60, 255, cv2.THRESH_BINARY)
    cv2.imshow('thresh', thresh)
    
    # find contours!
    contours, hry = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    
    # draw all the contours
    cpframe = frame.copy()
    cv2.drawContours(cpframe, contours, -1, (0,255,0), 3)
    cv2.imshow('cpframe', cpframe)
    
    # do various tests and modification
    contours = [ctr for ctr in contours if cv2.contourArea(ctr) > 100]
    contours = [cv2.approxPolyDP(ctr, 30 , True) for ctr in contours]
    contours = [ctr for ctr in contours if cv2.isContourConvex(ctr)]
    
    # draw on the frame
    cv2.drawContours(frame, contours, -1, (0,255,0), 3)
    
    return frame
    def __markerDetect(self, img_gray, min_size, min_side_length):
        possible_markers = []
        # thresh_size = (min_size / 4) * 2 + 1
        _,img_bin = cv2.threshold(img_gray, 125, 255, cv2.THRESH_BINARY_INV|cv2.THRESH_OTSU)
        all_contours, _ = cv2.findContours(img_bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
        contours = []
        for i in range(0, len(all_contours)):
            if(len(all_contours[i]) > min_size):
                contours.append(all_contours[i])

        for i in range(0, len(contours)):
            eps = len(contours[i]) * APPROX_POLY_EPS
            approx_poly = cv2.approxPolyDP(contours[i], eps, True)

            if(len(approx_poly)!=4):
                continue
            if(not cv2.isContourConvex(approx_poly)):
                continue

            min_side = 3.4028e38
            for j in range(0, 4):
                side = approx_poly[j]- approx_poly[(j+1)%4]
                min_side = min(min_side, side[0].dot(side[0]))
            if min_side < min_side_length * min_side_length:
                continue

            marker = Marker(0, approx_poly[0][0], approx_poly[1][0], approx_poly[2][0], approx_poly[3][0])
            v1 = marker.m_corners[1] - marker.m_corners[0]
            v2 = marker.m_corners[2] - marker.m_corners[0]
            if np.cross(v1, v2) > 0:
                temp = marker.m_corners[3]
                marker.m_corners[3] = marker.m_corners[1]
                marker.m_corners[1] = temp
            possible_markers.append(marker)
        return possible_markers
Ejemplo n.º 18
0
    def filter_contours(frame, contours):
        # remove faulty small contours detected by applying threshold on contour area
        # First calc avg area of a contour
        contour_area_sum = 0
        for i in range(len(contours)):
            contour_area_sum += cv2.contourArea(contours[i])
        contour_avg_area = contour_area_sum / len(contours)
        # Get the above average contours and not convex
        possible_hand_contour = []
        for i in range(len(contours)):
            if cv2.contourArea(contours[i]) > contour_avg_area and not cv2.isContourConvex(contours[i]):
                possible_hand_contour.append(contours[i])
        # Now convert possible hand contours to convex
        convex_hull = []
        for y in range(len(possible_hand_contour)):
            convex_hull.append(cv2.convexHull(possible_hand_contour[y]))

        # # Now get mean color of every convex hull and return the closest to the peak of the hand histogram
        # mean_colors = []
        # hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        # for i in range(len(convex_hull)):
        #     mask = np.zeros(hsv.shape[:2], np.uint8)
        #     cv2.drawContours(mask, [convex_hull[i]], 0, 255, -1)
        #     mean_colors.append(cv2.mean(hsv, mask=mask)[0])

        # detect hand by find max area (not accurate technique)
        max_area = 0
        max_hull = None
        for i in range(len(convex_hull)):
            hull = convex_hull[i]
            area = cv2.contourArea(hull)
            if area > max_area:
                max_area = area
                max_hull = hull
        return [max_hull]
Ejemplo n.º 19
0
def find_squares(img):
    img = cv2.GaussianBlur(img, (5, 5), 0)
    lists = []
    for thrs in xrange(0, 255, 26):
        if thrs == 0:
            bin = cv2.Canny(img, 0, 50, apertureSize=5)  #边缘化
            bin = cv2.dilate(bin, None) #膨胀
        else:
            retval, bin = cv2.threshold(img, thrs, 255, cv2.THRESH_BINARY)

        contours, hierarchy = cv2.findContours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) #得到轮廓

        for cnt in contours:

            cnt_len = cv2.arcLength(cnt, True) #计算轮廓长度
            cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True)
            if len(cnt) == 4 and cv2.contourArea(cnt) > 500 and cv2.isContourConvex(cnt):
                cnt = cnt.reshape(-1, 2)

                max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)]) #计算矩阵夹角是否90°

                if max_cos < 0.1 and horizontal(cnt[0],cnt[1]) <5 and isDigit(cnt[0],cnt[2]) :
                    a = cnt.tolist()
                    if a not in lists:
                        lists.append(cnt.tolist())
                        rect = rectu(cnt[0],cnt[2])
                        roi = img[rect[1]:rect[1]+rect[3], rect[0]:rect[0]+rect[2]]

                    if len(lists) > 0:
                        break
        if len(lists) > 0:
            break


    return roi
Ejemplo n.º 20
0
    def updateRobotPosition(self, frame, map):
        blurMapImage = cv2.GaussianBlur(frame, (5, 5), 0)
        for gray in cv2.split(blurMapImage):
            for threshold in xrange(0, 255, 24):
                if threshold == 0:
                    binary = cv2.Canny(gray, 0, 100, apertureSize=5)
                    binary = cv2.dilate(binary, None)
                else:
                    retval, binary = cv2.threshold(gray, threshold, 255, cv2.THRESH_BINARY)
                contours, hierarchy = cv2.findContours(binary, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
                for contour in contours:
                    contour_len = cv2.arcLength(contour, True)
                    contour = cv2.approxPolyDP(contour, 0.02*contour_len, True)

                    if cv2.contourArea(contour) > 100 and cv2.isContourConvex(contour) and cv2.contourArea(contour) < 800:
                        myShape = self.shapeFactory.ConstructShape(contour)
                        myShape.findColor(frame)

                        if(myShape.getColorName() == "Purple" and myShape.getName() == "Circle"):
                            map.robot.purpleCircle = myShape

                        if(myShape.getColorName() == "Black" and myShape.getName() == "Circle"):
                            map.robot.blackCircle = myShape
        map.robot.setCenter()
        map.robot.setOrientation()
Ejemplo n.º 21
0
def get_contour_properties(cnt, as_dict=True):
    x, y, w, h = cv2.boundingRect(cnt)
    area = cv2.contourArea(cnt)
    perimeter = cv2.arcLength(cnt, True)

    # # a convex polygon has no indents in any edges
    # # a concave polygon has an indent in at least one edge
    is_convex = cv2.isContourConvex(cnt)

    rotated_rectangle, angle_of_rotation = get_rotated_rec(cnt)
    aspect_ratio = float(w/h)
    rect_area = w*h
    extent = float(area)/rect_area

    chull = cv2.convexHull(cnt)
    chull_area = cv2.contourArea(chull)

    try:
        solidity = float(area)/chull_area
    except ZeroDivisionError:
        solidity = None

    compactness = get_compactness_from_roundness(cnt)
    roundness = get_roundness(cnt)

    if as_dict:
        contour_properties = {
            "x": x,
            "y": y,
            "w": w,
            "h": h,
            "area": area,
            "perimeter": perimeter,
            "is_convex": is_convex,
            "angle_of_rotation": angle_of_rotation,
            "aspect_ratio": aspect_ratio,
            "extent": extent,
            "solidity": solidity,
            "compactness": compactness,
            "roundness": roundness
        }
    else:
        contour_properties = [
            x,
            y,
            w,
            h,
            area,
            perimeter,
            is_convex,
            angle_of_rotation,
            aspect_ratio,
            extent,
            solidity,
            compactness,
            roundness
        ]

    return contour_properties
Ejemplo n.º 22
0
def _is_valid_screen_contour(im, contour):
    if len(contour) != 4 or not cv2.isContourConvex(contour):
        return False

    # Make sure it's fully contained in the upper third of the module
    max_height = im.shape[0] / 3
    x, y, w, h = cv2.boundingRect(contour)
    return x < max_height and x + h < max_height
Ejemplo n.º 23
0
    def process(self, imageLeftRect, imageRightRect, imageDisparityRect, cameraModel, stereoCameraModel, upper, lower):
        assert(imageLeftRect is not None)
        feedback = TrackObjectFeedback()
        feedback.found = False
        imageHLS = cv2.cvtColor(imageLeftRect, cv2.COLOR_BGR2HLS)
        lower = np.array([0,70,50], dtype = 'uint8')
        upper = np.array([200,255,255], dtype='uint8')
        mask=cv2.inRange(imageHLS, lower,upper) #HLS thresholds
        output = cv2.bitwise_and(imageLeftRect, imageLeftRect, mask=mask)
        self.image_pub.publish(self.bridge.cv2_to_imgmsg(imageLeftRect, "bgr8"))
        #mask=cv2.inRange(imageHSV, np.array([20,30,80],dtype='uint8'),np.array([40,52,120],dtype='uint8'))
        cnts = cv2.findContours(mask.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)    
        contours = cnts[1]

        if len(contours) == 0:
            print("No contours")
            return feedback

        rects = []
        for contour in contours: #adapted from https://github.com/opencv/opencv/blob/master/samples/python/squares.py
            epsilon = cv2.arcLength(contour, True)*0.05
            contour = cv2.approxPolyDP(contour, epsilon, True)
            if len(contour) == 4 and cv2.isContourConvex(contour):
                contour = contour.reshape(-1, 2)
                max_cos = np.max([angle_cos( contour[i], contour[(i+1) % 4], contour[(i+2) % 4] ) for i in xrange(4)])
                if max_cos < 0.1:
                    rects.append(contour)

        if len(rects) > 1:
            rects = greatestNAreaContours(rects, 2)
            rect1 = list(cv2.minAreaRect(rects[0]))
            rect2 = list(cv2.minAreaRect(rects[1]))

            if(rect1[1][0] < rect1[1][1]): #Fix wonky angles from opencv (I think)
                rect1[2] = (rect1[2] + 180) * 180/3.141
            else:
                rect1[2] = (rect1[2] + 90) * 180/3.141

            if(rect2[1][0] < rect2[1][1]):
                rect2[2] = (rect2[2] + 180) * 180/3.141
            else:
                rect2[2] = (rect2[2] + 90) * 180/3.141

            gateCenter = (int((rect1[0][0] + rect2[0][0])/2), int((rect1[0][1] + rect2[0][1])/2))
            self.feedback_msg.center = gateCenter
            self.feedback_msg.size = imageRightRect.shape
        self.feedback_pub.publish(self.feedback_msg)

        #feedback.center = gateCenter
            #feedback.size = imageRightRect.shape

        if gateCenter[0] - rect1[0][0] > 0:
            feedback.width = (rect2[0][0]+(rect2[1][0]/2)) - (rect1[0][0] - (rect1[1][0]/2))
        else:
            feedback.width = (rect1[0][0] -(rect1[1][0]/2)) - (rect2[0][0]+(rect2[1][0]/2))
        feedback.height = rect1[1][1]
        feedback.found = True
        return feedback
Ejemplo n.º 24
0
def _squares(rawimg):
    """Convert given image to gray and split with adaptove
    thresholding to
    detect edges. Filter out contours that are square like
    and return them along with their hierarchy.

    :rawimg: cv2 image
    :returns: list of edges, list of hierarchy

    """
    #Downscale and rescale to remove noise
    frame=cv2.pyrDown(rawimg)

    #lose colors
    gray=cv2.cvtColor(frame,cv2.COLOR_RGB2GRAY)

    frame=cv2.pyrUp(gray)

    #Use Otsu threshold to make binary
    #ret,otsu=cv2.threshold(frame,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
    #Use adaptive threshold to make binary
    adaptive=cv2.adaptiveThreshold(frame,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,11,3)
    cv2.adaptiveThreshold


    #find the edges
    edges=cv2.Canny(adaptive,0,255)

    #cv2.imshow("adaptive",adaptive)
    #cv2.imshow("gray",gray)
    #cv2.imshow("edges",edges)

    #Approximate the contours into polygons
    contours,hierarchy=cv2.findContours(edges,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)

    squares=[]
    hiersq=[]
    for cnt,hier in zip(contours,hierarchy[0]):
        approx=cv2.approxPolyDP(cnt,0.05*cv2.arcLength(cnt,True),True)

        if len(approx)!=4:
            continue
        elif cv2.contourArea(approx)<200:
            continue
        elif not cv2.isContourConvex(approx):
            continue

        length=cv2.arcLength(approx,True)
        area=cv2.contourArea(approx)
        area2=(length/4)*(length/4)

        if area/area2<0.9:
            continue

        squares.append(approx)
        hiersq.append(hier)

    return squares,hiersq
Ejemplo n.º 25
0
	def callback(self,data):
		try:
			img = self.bridge.imgmsg_to_cv2(data, "bgr8")
		except CvBridgeError as e:
			print(e)

		#imageHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    		contours = ThreshAndContour(img, self.upper, self.lower)
		contours = contours[1]
    		#output = cv2.bitwise_and(img, img, mask=mask)

    		if len(contours) == 0:
        		return None

		rects = []
		#cv2.drawContours(img,contours,-1, (0,255,0), 3)
		for contour in contours: #adapted from https://github.com/opencv/opencv/blob/master/samples/python/squares.py
			epsilon = cv2.arcLength(contour, True)*0.05
	        	contour = cv2.approxPolyDP(contour, epsilon, True)
	       		if len(contour) == 4 and cv2.isContourConvex(contour):
	            		contour = contour.reshape(-1, 2)
	            		max_cos = np.max([angle_cos( contour[i], contour[(i+1) % 4], contour[(i+2) % 4] ) for i in range(4)])
	            		if max_cos < 0.1:
	                		rects.append(contour)
	        
	        	if len(rects) > 1:
				rects = sorted(contours, key=cv2.contourArea, reverse=True)
				rect1 = cv2.minAreaRect(rects[0])
	            		rect2 = cv2.minAreaRect(rects[1])

	            		if(rect1[1][0] < rect1[1][1]): #Fix wonky angles from opencv (I think)
	                		rect1 = (rect1[0], rect1[1], (rect1[2] + 180) * 180/3.141)
	            		else:
	                		rect1 = (rect1[0], rect1[1], (rect1[2] + 90) * 180/3.141)
	                
	            		if(rect2[1][0] < rect2[1][1]):
	                		rect2 = (rect2[0], rect2[1], (rect2[2] + 180) * 180/3.141)
	            		else:
	                		rect2 = (rect2[0], rect2[1], (rect2[2] + 90) * 180/3.141)

                                box = cv2.boxPoints(rect1)
                                box = np.int0(box)
                                #cv2.drawContours(img,[box],-1,(0,0,255),2)
                                box = cv2.boxPoints(rect2)
                                box = np.int0(box)
                                #cv2.drawContours(img,[box],-1,(0,0,255),2)

				gateLocation = None
	            		gateAxis = None
	            		gateAngle = None                     
	            		gateCenter = (int((rect1[0][0] + rect2[0][0])/2), int((rect1[0][1] + rect2[0][1])/2))
				cv2.circle(img,gateCenter,5,(0,255,0),3)

		try:
			self.image_pub.publish(self.bridge.cv2_to_imgmsg(img,"bgr8"))
		except CvBridgeError as e:
			print(e)
Ejemplo n.º 26
0
Archivo: boxes.py Proyecto: gunnjo/Misc
def filterContour( i, contours, hierarchy):
    minArea = 2.75*1.25 # minimum area in inches
    ppi = 125.
    if hierarchy[i][3] != -1 : #only parent objects
        return None
    epsilon = 0.1*cv2.arcLength(contours[i],True)
    approx = cv2.approxPolyDP(contours[i],epsilon,True)
    area = cv2.contourArea(approx)
    convex = cv2.isContourConvex(approx)
    print "filtering ", approx, " length: ", len(approx), " area: ", area, " convex: ", convex
    if ( len(approx) != 4  ): #only square objects
        return None
    if ( cv2.isContourConvex(approx) is False ): #That are closed
        return None
    if area < ((minArea*ppi*.9)): #only large enough objects
        return None
    print "accepted ", approx
    return(approx)
Ejemplo n.º 27
0
 def numberOfConvexContours(self):
     '''The number of convex contours (i.e. make a circle like an O)'''
     if self.contours == None:
         self.withCanny()
     convex=0
     for c in self.contours:
         if cv2.isContourConvex(c):
             convex += 1
     return convex
Ejemplo n.º 28
0
def contour_is_rectangular(contour):
    cnt_len = cv2.arcLength(contour, True)
    contour = cv2.approxPolyDP(contour, 0.02*cnt_len, True)
    if len(contour) == 4 and cv2.contourArea(contour) > 1000 and cv2.isContourConvex(contour):
        contour = contour.reshape(-1, 2)
        max_cos = np.max([angle_cos( contour[i], contour[(i+1) % 4], contour[(i+2) % 4] ) for i in xrange(4)])
        if max_cos < 0.5:
            return True
    return False
Ejemplo n.º 29
0
def draw_approx(img, index, contours, y, detection_color):
    cnt = contours[index]
    epsilon = y * cv2.arcLength(cnt,True)
    approx = cv2.approxPolyDP(cnt,epsilon,True)
    if cv2.isContourConvex(approx):
        cv2.drawContours(img,[approx],0 ,detection_color[:3],2) 
        return approx
    else:
        approx = []
        return approx
Ejemplo n.º 30
0
                            cv2.THRESH_BINARY | cv2.THRESH_OTSU)

# 외곽선 검출 및 명함 검출
contours, _ = cv2.findContours(src_bin, cv2.RETR_EXTERNAL,
                               cv2.CHAIN_APPROX_NONE)

for pts in contours:
    # 너무 작은 객체는 제외
    if cv2.contourArea(pts) < 10:
        continue

    # 외곽선 근사화
    approx = cv2.approxPolyDP(pts, cv2.arcLength(pts, True) * 0.02, True)

    # 컨벡스가 아니면 제외
    if not cv2.isContourConvex(approx) or len(approx) != 4:
        continue

    print(len(approx))

    cv2.polylines(src, [approx], True, (0, 255, 0), 2, cv2.LINE_AA)
    srcQuad = reorderPts(approx.reshape(4, 2).astype(np.float32))

    pers = cv2.getPerspectiveTransform(srcQuad, dstQuad)
    dst = cv2.warpPerspective(src, pers, (dw, dh), flags=cv2.INTER_CUBIC)

    dst_rgb = cv2.cvtColor(dst, cv2.COLOR_BGR2RGB)
    print(pytesseract.image_to_string(dst_rgb, lang='Hangul+eng'))

cv2.imshow('src', src)
cv2.imshow('src_gray', src_gray)
Ejemplo n.º 31
0
def contour(image = 'something', percent_arclength = 0.1):
	img = cv2.imread(image, 0)
	original_image = cv2.imread(image, 0)
	ret, thresh = cv2.threshold(img, 127, 255, 0)
	contours, hierarchy = cv2.findContours(thresh, 1, 2)

	cnt = contours[0]
	M = cv2.moments(cnt)
	#M is the number of moment values calculated

	#You can then calculate centroids from the following lines:
	cx = int(M['m10']/M['m00'])
	cy = int(M['m01']/M['m00'])

	#Contour areas, perimeter, and approximation
	area = cv2.contourArea(cnt)
	perimeter = cv2.arcLength(cnt, True)
	
	#10% of arclength, Contour
	epsilon = percent_arclength*cv2.arcLength(cnt,True)
	approx = cv2.approxPolyDP(cnt,epsilon,True)
	cv2.imwrite('output/approx.jpg', approx)

	#ConvexHull
	hull = cv2.convexHull(cnt)
	cv2.imwrite('output/convex_hull.jpg', hull)
	#Convexity, k
	k = cv2.isContourConvex(cnt)

	print_dict = dict(zip(["M", "cx", "cy", "area", "perimeter", "epsilon", "k"] , [M, cx, cy, area, perimeter, epsilon, k]))
	print(str(print_dict))

	#Bounding Rectangle
	x,y,w,h = cv2.boundingRect(cnt)
	img1 = cv2.rectangle(original_image,(x,y),(x+w,y+h),(0,255,0),2)

	cv2.imwrite('output/BoundingRect.jpg', img1)

	img = reload_image(image)

	#Rotated Bounding Rectangle
	rect = cv2.minAreaRect(cnt)
	box = cv2.boxPoints(rect)
	box = np.int0(box)
	img5 = cv2.drawContours(img,[box],0,(0,0,255),2)

	cv2.imwrite('output/BoundingRectRotated.jpg', img5)

	img = reload_image(image)

	#Minimum Enclosing Circle
	(x,y),radius = cv2.minEnclosingCircle(cnt)
	center = (int(x),int(y))
	radius = int(radius)
	img2 = cv2.circle(img,center,radius,(0,255,0),2)
	cv2.imwrite('output/MinCircle.jpg', img2)

	img = reload_image(image)

	#Fitting Ellipse
	ellipse = cv2.fitEllipse(cnt)
	img4 = cv2.ellipse(img,ellipse,(0,255,0),2)
	cv2.imwrite('output/FitEllipse.jpg', img4)
	
	img = reload_image(image)

	#Fitting a line
	rows,cols = img.shape[:2]
	[vx,vy,x,y] = cv2.fitLine(cnt, cv2.DIST_L2,0,0.01,0.01)
	lefty = int((-x*vy/vx) + y)
	righty = int(((cols-x)*vy/vx)+y)
	img3 = cv2.line(img,(cols-1,righty),(0,lefty),(0,255,0),2)
	cv2.imwrite('output/FitLine.jpg', img3)
Ejemplo n.º 32
0
    def get_contour_shape(self, contour):

        shape = None
        peri = cv2.arcLength(contour, True)
        approx = cv2.approxPolyDP(contour, 0.04 * peri, True)
        length = len(approx)
        points = []

        if cv2.isContourConvex(approx):
            if length == 3:

                for i in range(length):
                    points.append((approx[i][0][0], approx[i][0][1]))

                distances = [
                    distance.euclidean(points[0], points[1]),
                    distance.euclidean(points[0], points[2]),
                    distance.euclidean(points[1], points[2])
                ]

                my_mean = np.mean(distances)

                wrong_size = False

                for ele in distances:
                    if abs(ele - my_mean) > 0.1 * my_mean:
                        wrong_size = True

                if not wrong_size:
                    shape = Values.TRIANGLE

            elif length == 4:

                for i in range(length):
                    points.append((approx[i][0][0], approx[i][0][1]))

                distances = [
                    distance.euclidean(points[0], points[1]),
                    distance.euclidean(points[0], points[2]),
                    distance.euclidean(points[0], points[3]),
                    distance.euclidean(points[1], points[2]),
                    distance.euclidean(points[1], points[3]),
                    distance.euclidean(points[2], points[3])
                ]

                distances.sort()

                first_four = distances[:4]

                my_mean = np.mean(first_four)

                wrong_size = False

                for ele in first_four:
                    if abs(ele - my_mean) > 0.1 * my_mean:
                        wrong_size = True

                if not wrong_size:
                    shape = Values.SQUARE

            elif 7 < length < 23:
                shape = Values.CIRCLE

        return shape, points
Ejemplo n.º 33
0
    # Draw contours
    drawing = np.zeros((canny.shape[0], canny.shape[1], 3), dtype=np.uint8)
    for i in range(len(contours)):
        color = (0, 255, 0)
        cv.drawContours(drawing, contours, i, color, 1, cv.LINE_8, hierarchy,
                        0)
    # Show in a window
    cv.imshow('Contours', drawing)

    squares = []
    triangles = []
    angle = 0
    for cnt in contours:
        cnt_len = cv.arcLength(cnt, True)
        cnt = cv.approxPolyDP(cnt, 0.02 * cnt_len, True)
        if len(cnt) == 4 and cv.contourArea(cnt) > 200 and cv.isContourConvex(
                cnt):
            cnt = cnt.reshape(-1, 2)
            approxCurve = cnt
            sum_angles = quad_sum(cnt)
            minCornerDistancePixels = len(cnt) * minCornerDistanceRate

            for j in range(0, 4):
                d = (approxCurve[j][0] - approxCurve[
                    (j + 1) % 4][0]) * (approxCurve[j][0] - approxCurve[
                        (j + 1) % 4][0]) + (approxCurve[j][1] - approxCurve[
                            (j + 1) % 4][1]) * (approxCurve[j][1] -
                                                approxCurve[(j + 1) % 4][1])
                minDistSq = min(minDistSq, d)

            if sum_angles == 360 and minDistSq >= minCornerDistancePixels * minCornerDistancePixels:
                tooNearBorder = False
Ejemplo n.º 34
0
def detect_markers(img):
    """
    This is the main function for detecting markers in an image.

    Input:
      img: a color or grayscale image that may or may not contain a marker.

    Output:
      a list of found markers. If no markers are found, then it is an empty list.
    """
    if len(img.shape) > 2:
        width, height, _ = img.shape
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    else:
        width, height = img.shape
        gray = img

    edges = cv2.Canny(gray, 10, 100)
    contours, hierarchy = cv2.findContours(edges.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[-2:]

    # We only keep the long enough contours
    min_contour_length = min(width, height) / 50
    contours = [contour for contour in contours if len(contour) > min_contour_length]
    warped_size = 49
    canonical_marker_coords = array(
        (
            (0, 0),
            (warped_size - 1, 0),
            (warped_size - 1, warped_size - 1),
            (0, warped_size - 1)
        ),
        dtype='float32')

    markers_list = []
    for contour in contours:
        approx_curve = cv2.approxPolyDP(contour, len(contour) * 0.01, True)
        if not (len(approx_curve) == 4 and cv2.isContourConvex(approx_curve)):
            continue

        sorted_curve = array(
            cv2.convexHull(approx_curve, clockwise=False),
            dtype='float32'
        )
        persp_transf = cv2.getPerspectiveTransform(sorted_curve, canonical_marker_coords)
        warped_img = cv2.warpPerspective(img, persp_transf, (warped_size, warped_size))

        # do i really need to convert twice?
        if len(warped_img.shape) > 2:
            warped_gray = cv2.cvtColor(warped_img, cv2.COLOR_BGR2GRAY)
        else:
            warped_gray = warped_img

        _, warped_bin = cv2.threshold(warped_gray, 127, 255, cv2.THRESH_BINARY)
        marker = warped_bin.reshape(
            [MARKER_SIZE, warped_size // MARKER_SIZE, MARKER_SIZE, warped_size // MARKER_SIZE]
        )
        marker = marker.mean(axis=3).mean(axis=1)
        marker[marker < 127] = 0
        marker[marker >= 127] = 1

        try:
            marker = validate_and_turn(marker)
            hamming_code = extract_hamming_code(marker)
            marker_id = int(decode(hamming_code), 2)
            markers_list.append(HammingMarker(id=marker_id, contours=approx_curve))
        except ValueError:
            continue
    return markers_list
def isConvex(contour):
    return cv2.isContourConvex(contour)
Ejemplo n.º 36
0
  ROI = frame[50:300,380:600]
  cv2.rectangle(frame,(380-2,50-2),(600+2,300+2),(255,0,0),1)
  grayROI = cv2.cvtColor(ROI,cv2.COLOR_BGR2GRAY)

  canny = cv2.Canny(gray,10,150)
  canny = cv2.dilate(canny,None,iterations=2)
  #_,cnts,_ = cv2.findContours(canny,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
  cnts,_ = cv2.findContours(canny,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
  #cv2.drawContours(frame,cnts,-1,(0,255,0),2)
  for c in cnts:
    area = cv2.contourArea(c)
    x,y,w,h = cv2.boundingRect(c)
    epsilon = 0.1*cv2.arcLength(c,True)
    approx = cv2.approxPolyDP(c,epsilon,True)
    if cv2.isContourConvex(c)==False :

      if len(approx)==4 and area>10000:
        cv2.drawContours(frame,[approx],0,(0,255,255),3)
        aspect_ratio = float(w)/h
        if aspect_ratio==1 and w==h:
          #cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255),3)
          #cv2.drawContours(frame,[approx],0,(0,255,0),3)
          cv2.putText(frame, "Cubo",(x,y),1,2,(255,255,9),2)
          cv2.imshow("Encontrado",frame.copy())

      if len(approx)==5 and area>10000:
        #cv2.drawContours(frame,[approx],0,(255,0,255),3)
        aspect_ratio = float(w)/h
        cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255),3)
        #cv2.drawContours(frame,[approx],0,(0,255,0),3)
Ejemplo n.º 37
0
def get_inputs(imageid, pre_df, seg_df = None):
    inputs = []    
    
    pred_msk = get_mask(imageid, pre_df)
    y_pred = measure.label(pred_msk, neighbors=8, background=0)
    ship_pro = pre_df.loc[imageid, 'p_ship'].mean()
    
    props = measure.regionprops(y_pred)
    for i in range(len(props)):
        if props[i].area < 10:
            y_pred[y_pred == i+1] = 0
    pred_labels = measure.label(y_pred, neighbors=8, background=0)
    pred_props = measure.regionprops(y_pred)
    init_count = len(pred_props)

    coords = [pr.centroid for pr in pred_props]
#     print('len(coords)', len(coords))
    if len(coords) > 0:
#         print('make neighbors')
        t = KDTree(coords)
        neighbors100 = t.query_radius(coords, r=50)
        neighbors200 = t.query_radius(coords, r=100)
        neighbors300 = t.query_radius(coords, r=150)
        neighbors400 = t.query_radius(coords, r=200)
        areas = np.asarray([pr.area for pr in props])
        med_area = np.median(areas)
        max_area = np.max(areas)
    
    for i in range(len(pred_props)):
        cur_prop = pred_props[i]
        is_on_border = 1 * ((cur_prop.bbox[0] <= 1) | (cur_prop.bbox[1] <= 1) | (cur_prop.bbox[2] >= y_pred.shape[0] - 1) | (cur_prop.bbox[3] >= y_pred.shape[1] - 1))
  
        msk_reg = pred_labels[cur_prop.bbox[0]:cur_prop.bbox[2], cur_prop.bbox[1]:cur_prop.bbox[3]] == i+1
        pred_reg = y_pred[cur_prop.bbox[0]:cur_prop.bbox[2], cur_prop.bbox[1]:cur_prop.bbox[3]]
        
        contours = cv2.findContours((msk_reg * 255).astype(dtype=np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        if len(contours[1]) > 0:
            cnt = contours[1][0]
            min_area_rect = cv2.minAreaRect(cnt)
        
        inp = []
        inp.append(ship_pro)
        inp.append(cur_prop.area)
        inp.append(cur_prop.area / med_area)
        inp.append(cur_prop.area / max_area)
        if len(contours[1]) > 0:
            inp.append(cv2.isContourConvex(cnt) * 1.0)
            inp.append(min(min_area_rect[1]))
            inp.append(max(min_area_rect[1]))
            if max(min_area_rect[1]) > 0:
                inp.append(min(min_area_rect[1]) / max(min_area_rect[1]))
            else:
                inp.append(0)
            inp.append(min_area_rect[2])
        else:
            inp.append(0)
            inp.append(0)
            inp.append(0)
            inp.append(0)
            inp.append(0)
        inp.append(cur_prop.convex_area)
        inp.append(cur_prop.solidity)
        inp.append(cur_prop.eccentricity)
        inp.append(cur_prop.extent)
        inp.append(cur_prop.perimeter)
        inp.append(cur_prop.major_axis_length)
        inp.append(cur_prop.minor_axis_length)
        if(cur_prop.minor_axis_length > 0):
            inp.append(cur_prop.minor_axis_length / cur_prop.major_axis_length)
        else:
            inp.append(0)
            
        inp.append(cur_prop.euler_number)
        inp.append(cur_prop.equivalent_diameter)
        inp.append(cur_prop.perimeter ** 2 / (4 * cur_prop.area * math.pi))
        
        inp.append(is_on_border)        
        inp.append(init_count)
        inp.append(med_area)
        inp.append(cur_prop.area / med_area)

        inp = extend_neighbor_features(inp, cur_prop, pred_props, neighbors100[i], med_area, max_area)
        inp = extend_neighbor_features(inp, cur_prop, pred_props, neighbors200[i], med_area, max_area)
        inp = extend_neighbor_features(inp, cur_prop, pred_props, neighbors300[i], med_area, max_area)
        inp = extend_neighbor_features(inp, cur_prop, pred_props, neighbors400[i], med_area, max_area)
        
        
        inputs.append(np.asarray(inp))
        
    inputs = np.asarray(inputs)
    if seg_df is None:
        return inputs, pred_labels
    else:
        outputs = []
        truth_labels = get_mask(imageid, seg_df)
        truth_labels = measure.label(truth_labels, neighbors=8, background=0)
        truth_props = measure.regionprops(truth_labels)
        
        m = np.zeros((len(pred_props), len(truth_props)))
        
        for x in range(pred_labels.shape[1]):
            for y in range(pred_labels.shape[0]):
                if pred_labels[y, x] > 0 and truth_labels[y, x] > 0:
                    m[pred_labels[y, x]-1, truth_labels[y, x]-1] += 1
                    
        truth_used = set([])
        for i in range(len(pred_props)): 
            max_iou = 0
            for j in range(len(truth_props)):
                if m[i, j] > 0:
                    iou = m[i, j] / (pred_props[i].area + truth_props[j].area - m[i, j])
                    if iou > max_iou:
                        max_iou = iou
                    if iou > 0.5:
                        truth_used.add(j)
            if max_iou <= 0.5:
                max_iou = 0
            outputs.append(max_iou)
            
        outputs = np.asarray(outputs)
        fn = len(truth_props) - len(truth_used)
        
        return inputs, pred_labels, outputs, fn
Ejemplo n.º 38
0
    def find_plates(self):
        """
        Find the license plates in the image

        :rtype: list[(numpy.array, numpy.array)]
        :return: List of tuples containing the plate image and the plate rectangle location
            The plates returned must be a grayscale image with black background and white characters
        """

        # Create a grayscale version of the image
        gray_img = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)

        # Blur the image
        gray_img = cv2.adaptiveBilateralFilter(gray_img, (11, 11), 100)

        if __debug__:
            display.show_image(gray_img, 'Gray')

        blur_kernel_size = (3, 3)
        thresh = cv2.adaptiveThreshold(gray_img, 255,
                                       cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                       cv2.THRESH_BINARY_INV, 17, 2)
        blurred = cv2.GaussianBlur(thresh, blur_kernel_size, 0)

        if __debug__:
            display.show_image(blurred, 'Blurred')

        edges = cv2.Canny(blurred, 100, 100, 3)
        if __debug__:
            display.show_image(edges, 'Canny edges')

        contours, hierarchy = cv2.findContours(edges.copy(), cv2.RETR_LIST,
                                               cv2.CHAIN_APPROX_SIMPLE)
        if __debug__:
            display.draw_contours(self.image, contours)

        rectangles = []
        for i in contours:
            area = cv2.contourArea(i)
            if area > 50:
                peri = cv2.arcLength(i, True)
                approx = cv2.approxPolyDP(i, 0.02 * peri, True)

                if len(approx) == 4 and cv2.isContourConvex(approx):
                    if self._check_size(approx):
                        rectangles.append(approx)

        processing_plates = display.get_parts_of_image(self.image, rectangles)
        ret = []

        for i, processing_plate in enumerate(processing_plates):
            processing_plate = cv2.cvtColor(processing_plate,
                                            cv2.COLOR_BGR2GRAY)
            processing_plate = cv2.bitwise_not(processing_plate)
            a, processing_plate = cv2.threshold(processing_plate, 50, 255,
                                                cv2.THRESH_OTSU)

            img_height, img_width = processing_plate.shape
            img_area = img_height * img_width

            # If the area of the plate is below 4500, perform hq2x on the plate
            if img_area < 4500:
                ret.append((cv2.cvtColor(image.hq2x_zoom(processing_plate),
                                         cv2.COLOR_BGR2GRAY), rectangles[i]))
            else:
                ret.append((processing_plate, rectangles[i]))

        return ret
Ejemplo n.º 39
0
    def set_pecha_layout(self):
        #         a = cv.erode(self.img_arr.copy(), None,iterations=2)
        #         import Image
        #         Image.fromarray(cbox_arr*255).show()
        a = self.img_arr.copy()

        if self.img_arr.shape[1] > 2 * self.img_arr.shape[0]:
            self._page_type = 'pecha'
        else:
            self._page_type = 'book'

        if self._page_type == 'pecha':  # Page is pecha format
            a = self.draw_hough_outline(a)

        self.img_arr = a.copy()
        self.update_shapes()

        #        a= cv.morphologyEx(a, cv.MORPH_OPE#         if self._page_type == 'pecha': # Page is pecha format
        #             a = self.draw_hough_outline(a)N, None,iterations=5)
        #        a = cv.medianBlur(a, 9)
        #         import Image
        #         Image.fromarray(a*255).show()
        a = cv.GaussianBlur(a, (5, 5), 0)
        #        print a.dtype
        #        a = cv.GaussianBlur(a, (5, 5), 0)
        #        a = self.img_arr.copy()
        #         n = np.ones_like(a)
        _, contours, hierarchy = cv.findContours(a.copy(),
                                                 mode=cv.RETR_TREE,
                                                 method=cv.CHAIN_APPROX_SIMPLE)

        ## Most of this logic for identifying rectangles comes from the
        ## squares.py sample in opencv source code.
        def angle_cos(p0, p1, p2):
            d1, d2 = (p0 - p1).astype('float'), (p2 - p1).astype('float')
            return abs(
                np.dot(d1, d2) / np.sqrt(np.dot(d1, d1) * np.dot(d2, d2)))

        border_boxes = []

        for j, cnt in enumerate(contours):
            cnt_len = cv.arcLength(cnt, True)
            orig_cnt = cnt.copy()
            cnt = cv.approxPolyDP(cnt, 0.02 * cnt_len, True)
            if len(cnt) == 4 and cv.contourArea(
                    cnt) > 1000 and cv.isContourConvex(cnt):
                cnt = cnt.reshape(-1, 2)
                max_cos = np.max([
                    angle_cos(cnt[i], cnt[(i + 1) % 4], cnt[(i + 2) % 4])
                    for i in range(4)
                ])
                if max_cos < 0.1:
                    #                    print 'got one at %d' % j
                    #                    n = np.ones_like(a)
                    b = cv.boundingRect(orig_cnt)
                    #                     if self.clear_hr:
                    #                         print 'Warning: you are clearing text on a pecha page'
                    #                         self.img_arr[0:b[1]+b[3], :] = 1
                    x, y, w, h = b
                    #                    b = [x+10, y+10, w-10, h-10]
                    border_boxes.append(b)
#                     cv.rectangle(n, (x,y), (x+w, y+h), 0)
#                     cv.drawContours(n, [cnt], -1,0, thickness = 5)
#                    import Image
#                    Image.fromarray(n*255).save('/tmp/rectangles_%d.png' % j )

#         import Image
#         Image.fromarray(n*255).show()
        border_boxes.sort(key=lambda b: (b[0], b[1]))

        #border_boxes = border_boxes

        def get_edges(b):
            l = b[0]
            r = b[0] + b[2]
            t = b[1]
            b = b[1] + b[3]
            return (l, r, t, b)

        def bid(b):
            return '%d-%d-%d-%d' % (b[0], b[1], b[2], b[3])

        tree = {}
        for b in border_boxes:
            tree[bid(b)] = {
                'chars': [],
                'b': b,
                'boxes': [],
                'num_boxes': 0,
                'num_chars': 0
            }

        def b_contains_nb(b, nb):
            l1, r1, t1, b1 = get_edges(b)
            l2, r2, t2, b2 = get_edges(nb)
            return l1 <= l2 and r2 <= r1 and t1 <= t2 and b1 >= b2

        for i, b in enumerate(border_boxes):
            bx, by, bw, bh = b
            self.img_arr[by:by + 1, bx + 3:bx + bw - 3] = 1
            self.img_arr[by + bh, by + bh - 1:bx + 3:bx + bw - 3] = 1
            for nb in border_boxes[i + 1:]:
                if b_contains_nb(b, nb):
                    tree[bid(b)]['boxes'].append(bid(nb))
                    tree[bid(b)]['num_boxes'] = len(tree[bid(b)]['boxes'])

        self.update_shapes()
        #         import Image
        #         Image.fromarray(self.img_arr*255).show()

        tree_keys = tree.keys()
        tree_keys.sort(key=lambda x: tree[x]['num_boxes'])

        ## Assign contours to boxes
        for i in self.get_indices():
            for k in tree_keys:
                box = tree[k]
                b = box['b']

                #                print box['num_boxes']
                char_box = self.get_boxes()[i]
                if b_contains_nb(b, char_box):
                    tree[k]['chars'].append(i)
                    tree[k]['num_chars'] = len(tree[k]['chars'])
                    break
#        import pprint
#        pprint.pprint(tree)

        def qualified_box(bx):
            '''Helper function that ignores boxes that contain other boxes.
            This is useful for finding the main content box which should
            be among the innermost boxes that have no box children '''

            if tree[bx]['num_boxes'] == 0:
                return tree[bx]['num_chars']
            else:
                return -1

#        content_box = max(tree, key=lambda bx: tree[bx]['num_chars'])

        content_box = max(tree, key=qualified_box)
        #        print tree[content_box]['num_chars']
        #        self.indices = [i for i in tree[content_box]['chars'] if self.boxes[i][2] >= (np.floor(self.tsek_mean) -
        #               self.small_coef * np.floor(self.tsek_std))]
        #         self.indices = [i for i in tree[content_box]['chars'] if self.boxes[i][2] >= (np.floor(self.tsek_mean) -
        #                1.5 * np.floor(self.tsek_std))]
        self.indices = [
            i for i in tree[content_box]['chars'] if self.boxes[i][2] >= 7
        ]

        self.detect_num_lines(tree[content_box])
Ejemplo n.º 40
0
 def is_convex(self):
     '''Returns if the contour is convex'''
     return cv2.isContourConvex(self._contour)
Ejemplo n.º 41
0
    def process(self, inframe, outframe):
        # Get the next camera image (may block until it is captured) and here convert it to OpenCV BGR by default. If
        # you need a grayscale image instead, just use getCvGRAY() instead of getCvBGR(). Also supported are getCvRGB()
        # and getCvRGBA():
        inimg = inframe.getCvBGR()
        
        # Start measuring image processing time (NOTE: does not account for input conversion time): 
        #Truely useless and can be removed
        self.timer.start()
        #Convert the image from BGR(RGB) to HSV
        hsvImage = cv2.cvtColor( inimg, cv2.COLOR_BGR2HSV)
        
        ## Threshold HSV Image to find specific color
        binImage = cv2.inRange(hsvImage, (lowerHue, lowerSat, lowerVal), (upperHue, upperSat, upperVal))
        
        # Erode image to remove noise if necessary.
        binImage = cv2.erode(binImage, None, iterations = errode)
        #Dilate image to fill in gaps
        binImage = cv2.dilate(binImage, None, iterations = dilate)
        
        #This image is used to display the thresholded image. Bounding Rectangle is added below.
        #Use this image to tune your targeting parameters.
        binOut = cv2.cvtColor(binImage, cv2.COLOR_GRAY2BGR)
        
        ##Finds contours (like finding edges/sides), 'contours' is what we are after
        im2, contours, hierarchy = cv2.findContours(binImage, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_TC89_KCOS)
        
        ##arrays to will hold the good/bad polygons
        squares = []
        badPolys = []
        
        ## Parse through contours to find targets
        for c in contours:
            if (contours != None) and (len(contours) > 0):
                cnt_area = cv2.contourArea(c)
                hull = cv2.convexHull(c , 1)
                hull_area = cv2.contourArea(hull)  #Used in Solidity calculation
                p = cv2.approxPolyDP(hull, approx, 1)
                if (cv2.isContourConvex(p) != False) and (len(p) == 4) and (cv2.contourArea(p) >= area): #p=3 triangle,4 rect,>=5 circle
                    filled = cnt_area/hull_area
                    if filled <= solidity: #Used to determine if target is hollow or not
                        squares.append(p)
                else:
                    badPolys.append(p)
        
        ##BoundingRectangles are just CvRectangles, so they store data as (x, y, width, height)
        ##Calculate and draw the center of the target based on the BoundingRect
        for s in squares:        
            br = cv2.boundingRect(s)
            #Target "x" and "y" center 
            x = br[0] + (br[2]/2)
            y = br[1] + (br[3]/2)

            
        for s in squares:
            if len(squares) > 0:
                #Build "pixels" array to contain info desired to be sent to RoboRio

                cv2.rectangle(binOut, (br[0],br[1]),((br[0]+br[2]),(br[1]+br[3])),(0,0,255), 2,cv2.LINE_AA)




        # Convert our BGR output image to video output format and send to host over USB. If your output image is not
        # BGR, you can use sendCvGRAY(), sendCvRGB(), or sendCvRGBA() as appropriate:
        outframe.sendCvBGR(binOut)
        
        #Write calibration values to a text file named "Calibration" 
        CalFile = open('Calibration', 'w')
        CalFile.truncate()#Clear out old calibraton values
        CalFile.write(str(upperHue))
        CalFile.write(",")
        CalFile.write(str(lowerHue))
        CalFile.write(",")
        CalFile.write(str(upperSat))
        CalFile.write(",")
        CalFile.write(str(lowerSat))
        CalFile.write(",")
        CalFile.write(str(upperVal))
        CalFile.write(",")
        CalFile.write(str(lowerVal))
        CalFile.write(",")
        CalFile.write(str(errode))
        CalFile.write(",")
        CalFile.write(str(dilate))
        CalFile.write(",")
        CalFile.write(str(approx))
        CalFile.write(",")
        CalFile.write(str(area))
        CalFile.write(",")
        CalFile.write(str(solidity))
        
        CalFile.close()#Close calibration file
Ejemplo n.º 42
0
def LLR(img, points, lines):
	print(utils.call("LLR(img, points, lines)"))
	old = points

	# --- otoczka
	def __convex_approx(points, alfa=0.01):
		hull = scipy.spatial.ConvexHull(na(points)).vertices
		cnt = na([points[pt] for pt in hull])
		approx = cv2.approxPolyDP(cnt,alfa*\
				 cv2.arcLength(cnt,True),True)
		return llr_normalize(itertools.chain(*approx))
	# ---

	# --- geometria
	__cache = {}
	def __dis(a, b):
		idx = hash("__dis" + str(a) + str(b))
		if idx in __cache: return __cache[idx]
		__cache[idx] = np.linalg.norm(na(a)-na(b))
		return __cache[idx]

	nln = lambda l1, x, dx: \
		np.linalg.norm(np.cross(na(l1[1])-na(l1[0]),
								na(l1[0])-na(   x)))/dx
	# ---

	pregroup = [[], []]                   # podzial na 2 grupy (dla ramki)
	S = {}                                # ranking ramek // wraz z wynikiem

	points = llr_correctness(llr_normalize(points), img.shape) # popraw punkty

	# --- clustrowanie
	import sklearn.cluster
	__points = {}; points = llr_polysort(points); __max, __points_max = 0, []
	alfa = math.sqrt(cv2.contourArea(na(points))/49)
	X = sklearn.cluster.DBSCAN(eps=alfa*4).fit(points) # **(1.3)
	for i in range(len(points)): __points[i] = []
	for i in range(len(points)):
		if X.labels_[i] != -1: __points[X.labels_[i]] += [points[i]]
	for i in range(len(points)):
		if len(__points[i]) > __max:
			__max = len(__points[i]); __points_max = __points[i]
	if len(__points) > 0 and len(points) > 49/2: points = __points_max
	print(X.labels_)
	# ---

	# tworzymy zewnetrzny pierscien
	ring = __convex_approx(llr_polysort(points))

	n = len(points); beta = n*(5/100) # beta=n*(100-(skutecznosc LAPS))
	alfa = math.sqrt(cv2.contourArea(na(points))/49) # srednia otoczka siatki

	x = [p[0] for p in points]          # szukamy punktu
	y = [p[1] for p in points]          # centralnego skupiska
	centroid = (sum(x) / len(points), \
			    sum(y) / len(points))

	print(alfa, beta, centroid)

	#        C (x2, y2)        d=(x_1−x_0)^2+(y_1−y_0)^2, t=d_t/d
	#      B (x1, y1)          (x_2,y_2)=(((1−t)x_0+tx_1),((1−t)y_0+ty_1))
	#    .                    t=(x_0-x_2)/(x_0-x_1)
	#  .
	# A (x0, y0)

	def __v(l):
		y_0, x_0 = l[0][0], l[0][1]
		y_1, x_1 = l[1][0], l[1][1]
		
		x_2 = 0;            t=(x_0-x_2)/(x_0-x_1+0.0001)
		a = [int((1-t)*x_0+t*x_1), int((1-t)*y_0+t*y_1)][::-1]

		x_2 = img.shape[0]; t=(x_0-x_2)/(x_0-x_1+0.0001)
		b = [int((1-t)*x_0+t*x_1), int((1-t)*y_0+t*y_1)][::-1]

		poly1 = llr_polysort([[0,0], [0, img.shape[0]], a, b])
		s1 = llr_polyscore(na(poly1), points, centroid, beta=beta, alfa=alfa/2)
		poly2 = llr_polysort([a, b, \
				[img.shape[1],0], [img.shape[1],img.shape[0]]])
		s2 = llr_polyscore(na(poly2), points, centroid, beta=beta, alfa=alfa/2)
		
		return [a, b], s1, s2

	def __h(l):
		x_0, y_0 = l[0][0], l[0][1]
		x_1, y_1 = l[1][0], l[1][1]
		
		x_2 = 0;            t=(x_0-x_2)/(x_0-x_1+0.0001)
		a = [int((1-t)*x_0+t*x_1), int((1-t)*y_0+t*y_1)]

		x_2 = img.shape[1]; t=(x_0-x_2)/(x_0-x_1+0.0001)
		b = [int((1-t)*x_0+t*x_1), int((1-t)*y_0+t*y_1)]

		poly1 = llr_polysort([[0,0], [img.shape[1], 0], a, b])
		s1 = llr_polyscore(na(poly1), points, centroid, beta=beta, alfa=alfa/2)
		poly2 = llr_polysort([a, b, \
				[0, img.shape[0]], [img.shape[1], img.shape[0]]])
		s2 = llr_polyscore(na(poly2), points, centroid, beta=beta, alfa=alfa/2)

		return [a, b], s1, s2

	for l in lines: # bedziemy wszystkie przegladac
		for p in points: # odrzucamy linie ktore nie pasuja
			# (1) linia przechodzi blisko dobrego punktu
			t1 = nln(l, p, __dis(*l)) < alfa
			# (2) linia przechodzi przez srodek skupiska
			t2 = nln(l, centroid, __dis(*l)) > alfa * 2.5 # 3
			# (3) linia nalezy do pierscienia
			# t3 = True if p in ring else False
			if t1 and t2:
			#if (t1 and t2) or (t1 and t3 and t2): # [1 and 2] or [1 and 3 and 2]
				tx, ty = l[0][0]-l[1][0], l[0][1]-l[1][1]
				if abs(tx) < abs(ty): ll, s1, s2 = __v(l); o = 0
				else:                 ll, s1, s2 = __h(l); o = 1
				if s1 == 0 and s2 == 0: continue
				pregroup[o] += [ll]

	pregroup[0] = llr_unique(pregroup[0])
	pregroup[1] = llr_unique(pregroup[1])

	from corner_detection.laps import laps_intersections
	debug.image(img) \
		.lines(lines, color=(0,0,255)) \
		.points(laps_intersections(lines), color=(255,0,0), size=2) \
	.save("llr_debug_1")

	debug.image(img) \
		.points(laps_intersections(lines), color=(0,0,255), size=2) \
		.points(old, color=(0,255,0)) \
	.save("llr_debug_2")

	debug.image(img) \
		.lines(lines, color=(0,0,255)) \
		.points(points, color=(0,0,255)) \
		.points(ring, color=(0,255,0)) \
		.points([centroid], color=(255,0,0)) \
	.save("llr_debug")
	
	debug.image(img) \
		.lines(pregroup[0], color=(0,0,255)) \
		.lines(pregroup[1], color=(255,0,0)) \
	.save("llr_pregroups")
	
	print("---------------------")
	for v in itertools.combinations(pregroup[0], 2):            # poziome
		for h in itertools.combinations(pregroup[1], 2):        # pionowe
			poly = laps_intersections([v[0], v[1], h[0], h[1]]) # przeciecia
			poly = llr_correctness(poly, img.shape)             # w obrazku
			if len(poly) != 4: continue                         # jesl. nie ma
			poly = na(llr_polysort(llr_normalize(poly)))        # sortuj
			if not cv2.isContourConvex(poly): continue          # wypukly?
			S[-llr_polyscore(poly, points, centroid, \
				beta=beta, alfa=alfa/2)] = poly                 # dodaj

	S = collections.OrderedDict(sorted(S.items()))              # max
	K = next(iter(S))
	print("key --", K)
	four_points = llr_normalize(S[K])               # score

	# XXX: pomijanie warst, lub ich wybor? (jesli mamy juz okay)
	# XXX: wycinanie pod sam koniec? (modul wylicznia ile warstw potrzebnych)

	print("POINTS:", len(points))
	print("LINES:", len(lines))

	debug.image(img).points(four_points).save("llr_four_points")

	debug.image(img) \
		.points(points, color=(0,255,0)) \
 		.points(four_points, color=(0,0,255)) \
		.points([centroid], color=(255,0,0)) \
		.lines([[four_points[0], four_points[1]], [four_points[1], four_points[2]], \
		        [four_points[2], four_points[3]], [four_points[3], four_points[0]]], \
				color=(255,255,255)) \
	.save("llr_debug_3")

	return four_points
# 凸包
# 1.先找到轮廓
img = cv2.imread('convex.jpg', 0)
_, thresh = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
contours, hierarchy = cv2.findContours(thresh, 3, 2)
cnt = contours[0]

# 2.寻找凸包,得到凸包的角点
hull = cv2.convexHull(cnt)

# 3.绘制凸包
image = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
cv2.polylines(image, [hull], True, (0, 255, 0), 2)
cv2.imshow('convex hull', image)
cv2.waitKey(0)

# 轮廓是否是凸形的
print(cv2.isContourConvex(hull))  # True

# 关于returnPoints的理解:
print(hull[0])  # [[362 184]](坐标)
hull2 = cv2.convexHull(cnt, returnPoints=False)
print(hull2[0])  # [510](cnt中的索引)
print(cnt[510])  # [[362 184]]


# 点到轮廓距离(多边形测试)
dist = cv2.pointPolygonTest(cnt, (100, 100), True)  # -3.53
print(dist)
ar = cv2.contourArea(cnt)
print(ar)
#CONTOUR PERIMETER or ARC-LENTH
arclen = cv2.arcLength(cnt, True)  #arclen=cv2.arcLength(cnt,False)
print(arclen)
#CONTOUR APPROXIMATION: using Douglas Peucker algo.
#epsilon: maximum distance from contour to approximated contour. an accuracy parameter & wise selection needed.
epsilon = 0.1 * cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, epsilon, True)
print(epsilon, '\n', approx)
#CONVEX HULL: hull = cv2.convexHull(points[, hull[, clockwise[, returnPoints]]
hull = cv2.convexHull(
    cnt)  #to find convexity defects, you need to pass returnPoints = False.
print(hull)
#CONVEXITY chek
conv = cv2.isContourConvex(cnt)
print(conv)
#BOUNDING RECTANGLES: Straight bounding and rotated bounding
#Straight bounding
x, y, w, h = cv2.boundingRect(cnt)
img_rect = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 4)
#Rotated rectangle
rect = cv2.minAreaRect(cnt)
box = cv2.boxPoints(rect)
box = np.int0(box)
img_rotate = cv2.drawContours(img, [box], 0, (0, 0, 255), 2)
#MINIMUM ENCLOSING CIRCLE: circle which completely covers the object with minimum area.
(x, y), radius = cv2.minEnclosingCircle(cnt)
center = (int(x), int(y))
radius = int(radius)
img = cv2.circle(img, center, radius, (0, 255, 0), 2)
Ejemplo n.º 45
0
def match_warped(squares, image):
    markers = []
    k = 0
    for i in range(len(squares)):
        contours = squares[i][6]
        #patch =  image[(squares[i][1]):(squares[i][1]+squares[i][3]), (squares[i][0]):(squares[i][0]+squares[i][2])]
        #contours, hierarchy = cv.findContours(patch, cv.RETR_LIST, cv.CHAIN_APPROX_NONE)
        draw2 = np.zeros((squares[i][5].shape[0], squares[i][5].shape[1], 3), dtype=np.uint8)
        #draw2 = np.zeros((patch.shape[0], patch.shape[1], 3), dtype=np.uint8)
        #cv.drawContours(draw2, contours, i, (0,255,0), 1, cv.LINE_8, hierarchy)
        #cv.imshow("Patch", draw2)
        for cnt in contours:
            cnt_len = cv.arcLength(cnt, True)
            cnt = cv.approxPolyDP(cnt, 0.03*cnt_len, True)
            
            

            if len(cnt) == 4 and cv.isContourConvex(cnt):
                    cnt = cnt.reshape(-1, 2)
                    issquare = compare_distances(cnt)
                    if not(issquare):
                        continue

                    #cv.drawContours(draw2, contours, i, (0,255,0), 1, cv.LINE_8, hierarchy= None)

                    
                    #cv.imshow("desenho2", draw2)

                    #cv.waitKey(0)
                    #cv.destroyWindow('desenho2')

                    sumAngles = quad_sum(cnt)
                    if sumAngles != 360:
                        continue

                    x,y,w,h = cv.boundingRect(cnt)   

                    if x<0.2*squares[i][5].shape[0] or y<0.2*squares[i][5].shape[1]:
                        continue


                    draw = np.zeros((squares[i][5].shape[0], squares[i][5].shape[1], 3), dtype=np.uint8)
                    #cv.rectangle(squares[i][5], (x,y), (x+w,y+h), (0,255,0),1)

                    patch = squares[i][5][x:x+w, y:y+h]
                    #patch_medio = patch[x:x+w, y:y+h]
                    
                    #cv.imshow("path",patch)
                    
                    
                    cv.imshow("waq", squares[i][5])
                    print("POINTS")
                    print(cnt)
                    #cv.waitKey(0)
                    #cv.destroyWindow('desenh')

                    contoursT, _ = cv.findContours(patch, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
                    for cntT in contoursT:
                        cntT_len = cv.arcLength(cntT, True)
                        cntT = cv.approxPolyDP(cntT, 0.02*cntT_len, True)
                        cntT = cntT.reshape(-1, 2)
                        
                        #if len(cntT) == 4:
                        for point in cntT: 
                            draw2 = cv.circle(draw2,  (point[0], point[1]), radius = 2, color = (0,255,0), thickness = -1)

                        issquare = compare_distances(cntT)
                        if len(cntT) == 3 or not(issquare):# and cv.countourArea(cnt) > 200:
                            markers.append(squares[i])
                    cv.imshow("desenh", draw2)
                  
        k = k+1
    return markers
Ejemplo n.º 46
0
mrkctr = []


for c in contours:
#    print "drawing contour"
#    print c
    cv2.drawContours(img, [c], -1, (255, 255, 0), 3)
    plt.imshow(img)
#    plt.show()
#    x = raw_input( "continue ? (y/n)" )
#    if ( not ( x == "y" ) ) :
#      exit()

    approx_curve = cv2.approxPolyDP(c, cv2.arcLength(c,True) * 0.1, True)
#    approx_curve = cv2.approxPolyDP(c, len(c) * 0.01, False)
    if not (len(approx_curve) == 4 and cv2.isContourConvex(approx_curve)):
        print " Continue not having a suitable number of points or is not convex : Continue"
        continue

    
    sorted_curve = np.array(cv2.convexHull(approx_curve, clockwise=False), dtype='float32')
    print "sorted curve\n", sorted_curve
    
#    continue

    persp_transf = cv2.getPerspectiveTransform(sorted_curve,canonical_marker_coords)
    print persp_transf

    warped_img = cv2.warpPerspective(img, persp_transf, (warped_size, warped_size))

Ejemplo n.º 47
0
#img=cv2.imread('ploygon.png')#多边形
img=cv2.imread('shoushi.jpg')#手势
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret,thresh=cv2.threshold(gray,235,255,0)
contours,hierarcy=cv2.findContours(thresh,1,2)
for cnt in contours:
    hull=cv2.convexHull(cnt)
    length=len(hull)
    if length>5:
        cv2.polylines(img,[hull],True,(0,0,255),3)
cv2.imshow('hull',img)

1.6凸性检测
函数cv2.isContourConvex()可以用来检测一个曲线是否为凸的,它只能返回True或False,没什么用

k=cv2.isContourConvex(c1) #ploygon是false

1.7边界矩形
有两类边界矩形
直角界边界矩形:一个直矩形(就是没有旋转的矩形),它不会考虑对象是否旋转。所以边界矩形的面积不是最小的。可以使用函数cv2.boundingRect()查找得到
(x,y)为矩形左上角的坐标,(w,h)是矩形的宽和高
旋转边界矩形:这个边界矩形是面积最小的,因为它考虑了对象的旋转。用到的函数为cv2.minAreaRect().返回的是一个Box2D结构,其中包含了矩形中心
坐标(x,y),矩形的宽和高(w,h)以及旋转角度。但是要绘制这个矩形需要矩形的四个角点,可以通过cv2.boxPoints()获得
1.8最小外接圆
函数cv2.minEnclosingCircle()可以帮我们找到一个对象的外接圆。它是所有能够包括对象的圆中面积最小的一个
1.9椭圆拟合
ellipse=cv2.fitEllipse(c) + img=cv2.ellipse(im,ellipse,(255,255,0),2)返回值其实就是旋转边界矩形的内切圆
# =============================================================================
# https://blog.csdn.net/u011854789/article/details/79836242?utm_source=blogxgwz9  有关rect返回值的定义(旋转边界矩形)
# =============================================================================
import cv2
Ejemplo n.º 48
0
    def gen_page(self,img_id,data, reg_list=None, out_folder='./',
                 approx_alg=None, num_segments=None):
        """
        """
        self.approx_alg = self.approx_alg if approx_alg==None else approx_alg
        self.num_segments = self.num_segments if num_segments==None else num_segments
        self.logger.debug('Gen PAGE for image: {}'.format(img_id))
        #--- sym link to original image 
        #--- TODO: check if orig image exist
        img_name = os.path.basename(self.img_data[img_id])
        symlink_force(os.path.realpath(self.img_data[img_id]),
                      os.path.join(out_folder,img_name))
        o_img = cv2.imread(self.img_data[img_id])
        (o_rows, o_cols, _) = o_img.shape
        o_max = max(o_rows,o_cols)
        o_min = min(o_rows,o_cols)
        cScale = np.array([o_cols/self.out_size[1],
                           o_rows/self.out_size[0]])
        
        page = pageData(os.path.join(out_folder, 'page', img_id + '.xml'),
                        logger=self.logger)
        self.hyp_xml_list.append(page.filepath)
        self.hyp_xml_list.sort()
        page.new_page(img_name, str(o_rows), str(o_cols)) 
        ####
        if self.out_type == 'C':
            if self.ext_mode == 'L':
                lines = data[0].astype(np.uint8) 
                reg_list= ['full_page']
                colors = {'full_page':0}
                r_data = np.zeros(lines.shape, dtype=np.uint8)
            elif self.ext_mode == 'R':
                r_data = data[0]
                lines = np.zeros(r_data.shape, dtype=np.uint8)
                colors = self.classes
            elif self.ext_mode == 'LR':
                lines = data[0].astype(np.uint8) 
                r_data = data[1]
                colors = self.classes
            else:
                pass
        elif self.out_type == 'R':
            if self.ext_mode == 'L':
                l_color = (-1 - ((self.line_color*(2/255))-1))/2
                lines = np.zeros(data[0].shape, dtype = np.uint8)
                lines[data[0] >= l_color] = 1 
                reg_list= ['full_page']
                colors = {'full_page':128}
                r_data = np.zeros(lines.shape, dtype=np.uint8)
            elif self.ext_mode == 'R':
                r_data = data[1]
                colors = self.classes
                lines = np.zeros(r_data.shape, dtype=np.uint8)
            elif self.ext_mode == 'LR':
                l_color = (-1 - ((self.line_color*(2/255))-1))/2
                lines = np.zeros(data[0].shape, dtype = np.uint8)
                lines[data[0] >= l_color] = 1 
                r_data = data[1]
                colors = self.classes
            else:
                pass   
        else:
            pass
        reg_mask = np.zeros(r_data.shape,dtype='uint8')
        lin_mask = np.zeros(lines.shape,dtype='uint8')
        r_id = 0
        kernel = np.ones((5,5),np.uint8)

        #--- get regions and lines for each class
        for reg in reg_list:
            r_color = colors[reg]
            #--- fill the array is faster then create a new one or mult by 0
            reg_mask.fill(0)
            if self.out_type == 'R':
                lim_inf = ((r_color - self.th_span)*(2/255)) - 1
                lim_sup = ((r_color + self.th_span)*(2/255)) - 1
                reg_mask[np.where((r_data > lim_inf) & (r_data < lim_sup))] = 1
            elif self.out_type == 'C':
                reg_mask[r_data == r_color] = 1
            else:
                pass

            _ , contours, hierarchy = cv2.findContours(reg_mask,
                                                   cv2.RETR_EXTERNAL,
                                                   cv2.CHAIN_APPROX_SIMPLE)
            for cnt in contours:
                #--- remove small objects
                if(cnt.shape[0] < 4):
                    continue
                if(cv2.contourArea(cnt) < 0.01*self.out_size[0]):
                    continue
                #--- get lines inside the region
                lin_mask.fill(0)
                rect = cv2.minAreaRect(cnt)
                #--- soft a bit the region to prevent spikes 
                epsilon = 0.005*cv2.arcLength(cnt,True)
                approx = cv2.approxPolyDP(cnt,epsilon,True)
                #box = np.array((rect[0][0], rect[0][1], rect[1][0], rect[1][1])).astype(int)
                r_id = r_id + 1
                approx= (approx*cScale).astype('int32')
                reg_coords = ''
                for x in approx.reshape(-1,2):
                    reg_coords = reg_coords + " {},{}".format(x[0],x[1])

                if not self.ext_mode == 'R':
                    cv2.fillConvexPoly(lin_mask,points=cnt, color=(1,1,1))
                    lin_mask = cv2.erode(lin_mask,kernel,iterations = 1)
                    lin_mask = cv2.dilate(lin_mask,kernel,iterations = 1)
                    reg_lines = lines * lin_mask
                    #--- search for the lines
                    _, l_cont, l_hier = cv2.findContours(reg_lines,
                                                  cv2.RETR_EXTERNAL,
                                                  cv2.CHAIN_APPROX_SIMPLE)
                    if (len(l_cont) == 0):
                        continue
                    #--- Add region to XML only is there is some line
                    text_reg = page.add_element('TextRegion',
                                            str(r_id),
                                            reg,
                                            reg_coords.strip())
                    n_lines = 0
                    for l_id,l_cnt in enumerate(l_cont):
                        if(l_cnt.shape[0] < 4):
                            continue
                        if (cv2.contourArea(l_cnt) < 0.01*self.out_size[0]):
                            continue
                        #--- convert to convexHull if poly is not convex
                        if (not cv2.isContourConvex(l_cnt)):
                            l_cnt = cv2.convexHull(l_cnt)
                        lin_coords = ''
                        l_cnt = (l_cnt*cScale).astype('int32')
                        for l_x in l_cnt.reshape(-1,2): 
                            lin_coords = lin_coords + " {},{}".format(l_x[0],l_x[1])
                        (is_line, approx_lin) = self._get_baseline(o_img, l_cnt)
                        if is_line == False:
                            continue
                        text_line = page.add_element('TextLine',
                                                 str(l_id) + '_' + str(r_id),
                                                 reg,
                                                 lin_coords.strip(),
                                                 parent=text_reg)
                        baseline = pa.points_to_str(approx_lin)
                        page.add_baseline(baseline, text_line)
                        n_lines += 1
                    #--- remove regions without text lines
                    if n_lines == 0:
                        page.remove_element(text_reg)
                else:
                    text_reg = page.add_element('TextRegion',
                                            str(r_id),
                                            reg,
                                            reg_coords.strip())

        page.save_xml()
Ejemplo n.º 49
0
def markBackground(filename):
    # == Parameters =======================================================================
    # BLUR = 21
    CANNY_THRESH_1 = 0
    CANNY_THRESH_2 = 170
    MASK_DILATE_ITER = 10
    MASK_ERODE_ITER = 10
    MASK_COLOR = (0.0, 0.0, 1.0)  # In BGR format

    # == Processing =======================================================================

    # -- Read image -----------------------------------------------------------------------
    img = cv2.imread(filename)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    orig = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    # np.save("bananaORIG", orig)

    # -- Edge detection -------------------------------------------------------------------
    edges = cv2.Canny(gray, CANNY_THRESH_1, CANNY_THRESH_2)
    edges = cv2.dilate(edges, None)
    edges = cv2.erode(edges, None)

    # -- Find contours in edges, sort by area ---------------------------------------------
    contour_info = []
    contours, _ = cv2.findContours(edges, cv2.RETR_EXTERNAL,
                                   cv2.CHAIN_APPROX_SIMPLE)
    for c in contours:
        contour_info.append((
            c,
            cv2.isContourConvex(c),
            cv2.contourArea(c),
        ))
    contour_info = sorted(contour_info, key=lambda c: c[2], reverse=True)
    max_contour = contour_info[0]

    # -- Create empty mask, draw filled polygon on it corresponding to largest contour ----
    # Mask is black, polygon is white
    mask = np.zeros(edges.shape)
    cv2.fillConvexPoly(mask, max_contour[0], (255))

    # -- Smooth mask, then blur it --------------------------------------------------------
    mask = cv2.dilate(mask, None, iterations=MASK_DILATE_ITER)
    mask = cv2.erode(mask, None, iterations=MASK_ERODE_ITER)
    # mask = cv2.GaussianBlur(mask, (BLUR, BLUR), 0)  # we're not blurring
    mask_stack = np.dstack([mask] * 3)  # Create 3-channel alpha mask

    # -- Blend masked img into MASK_COLOR background --------------------------------------
    mask_stack = mask_stack.astype('float32') / 255.0  # Use float matrices,
    img = img.astype('float32') / 255.0  # for easy blending

    masked = (mask_stack * img) + ((1 - mask_stack) * MASK_COLOR)  # Blend
    masked = (masked * 255).astype('uint8')  # Convert back to 8-bit

    masked = cv2.cvtColor(masked, cv2.COLOR_BGR2RGB)
    np.save("bananaMASKED14", masked)
    mpl.image.imsave('bananaMASKED14.jpg', masked)
    plt.imshow(masked)
    plt.show()

    # cv2.imwrite('C:/Temp/person-masked.jpg', masked)           # Save

    return masked
Ejemplo n.º 50
0
def detect_markers(img, marker_size, camK):
    width, height, _ = img.shape
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    [img_min, img_max, minLoc, maxLoc] = cv2.minMaxLoc(gray)
    # cv2.imshow("gray", gray)

    edges = cv2.Canny(gray, 50, 100)
    # cv2.imshow("edges", edges)
    # cv2.waitKey(1)

    contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE,
                                           cv2.CHAIN_APPROX_NONE)[-2:]

    # We only keep the long enough contours
    min_contour_length = min(width, height) / 10
    contours = [
        contour for contour in contours if len(contour) > min_contour_length
    ]
    canonical_marker_coords = array(
        ((0, 0), (WARPED_SIZE - 1, 0), (WARPED_SIZE - 1, WARPED_SIZE - 1),
         (0, WARPED_SIZE - 1)),
        dtype='float32')

    # imgc = img.copy()
    # #cv2.drawContours(imgc, contours, -1, (0,255,0), 3)
    # for cidx in range(1, len(contours)):
    #     cv2.drawContours(imgc, contours, cidx, (randint(0,255), randint(0,255), randint(0,255)), 3)
    # cv2.imshow("contours", imgc)
    # cv2.waitKey(1)

    markers_list = []
    # polydtct_counters = []

    for contour in contours:
        approx_curve = cv2.approxPolyDP(contour, len(contour) * 0.05, True)
        if not (len(approx_curve) == 4 and cv2.isContourConvex(approx_curve)):
            continue

        sorted_curve = array(cv2.convexHull(approx_curve, clockwise=False),
                             dtype='float32')

        # polydtct_counters.append(cv2.convexHull(approx_curve, clockwise=False))

        # wrap image
        persp_transf = cv2.getPerspectiveTransform(sorted_curve,
                                                   canonical_marker_coords)
        warped_img = cv2.warpPerspective(img, persp_transf,
                                         (WARPED_SIZE, WARPED_SIZE))
        warped_gray = cv2.cvtColor(warped_img, cv2.COLOR_BGR2GRAY)

        # check max and min pixel value in the wrapped image
        # reject common unified color areas
        [warp_min, warp_max, minLoc, maxLoc] = cv2.minMaxLoc(warped_gray)
        if (warp_max - warp_min) / (img_max - img_min) < 0.3:
            continue

        # get a good threshold for binary operation,
        # average of all pixels
        wraped_gray_avg = cv2.mean(warped_gray)[0]

        # binary image
        _, warped_bin = cv2.threshold(warped_gray, wraped_gray_avg, 255,
                                      cv2.THRESH_BINARY)

        # # reshape to one block per pixel
        # marker = warped_bin.reshape(
        #     [MARKER_SIZE, WARPED_SIZE / MARKER_SIZE, MARKER_SIZE, WARPED_SIZE / MARKER_SIZE]
        # )
        # # binary reshaped image
        # marker = marker.mean(axis=3).mean(axis=1)
        # marker[marker < 127] = 0
        # marker[marker >= 127] = 1

        # get better coding from sampling not reshaping
        patch_size = WARPED_SIZE // MARKER_SIZE
        patch_kenrel = np.ones(
            (patch_size, patch_size), np.float32) / (patch_size * patch_size)
        wraped_bin_filter = cv2.filter2D(warped_bin,
                                         -1,
                                         patch_kenrel,
                                         borderType=cv2.BORDER_REPLICATE)
        kernel_accept_thresh = 0.4  # < 0.5

        marker = np.zeros((MARKER_SIZE, MARKER_SIZE))
        read_marker_success = True
        for i in range(1, MARKER_SIZE):
            for j in range(1, MARKER_SIZE):

                # # single pixel sampling
                # if warped_bin[(i+0.5)*patch_size, (j+0.5)*patch_size] > 0:
                #     marker[i,j] = 1

                # kernel sampling method
                if wraped_bin_filter[int((i + 0.5) * patch_size),
                                     int((j + 0.5) * patch_size)] > 256 * (
                                         1 - kernel_accept_thresh):
                    marker[i, j] = 1
                elif wraped_bin_filter[int(
                    (i + 0.5) * patch_size
                ), int((j + 0.5) * patch_size)] > 256 * kernel_accept_thresh:
                    read_marker_success = False
                    break

        if not read_marker_success:
            continue

        # cv2.imshow("bin", warped_bin)
        # cv2.waitKey(50)
        # cv2.imshow("warped_marker", rot90(warped_bin, k=0))
        # cv2.waitKey(50)

        try:
            # rotate marker by checking which corner is white
            turn_number = validate_and_get_turn_number(marker)
            marker = rot90(marker, k=turn_number)

            # get id
            hamming_code = extract_hamming_code(marker)
            marker_id = int(decode(hamming_code), 2)

        except ValueError:
            continue

        # rotate corner list
        rotated_contour = rotate_contour(sorted_curve, persp_transf,
                                         turn_number)
        detected_marker = HammingMarker(id=marker_id,
                                        contours=rotated_contour,
                                        size=marker_size)

        # get pose and update detected marker
        pose_results = get_marker_pose(detected_marker, detected_marker.size,
                                       camK)
        if pose_results[0]:
            detected_marker.rvec = pose_results[1]
            detected_marker.tvec = pose_results[2]
        else:
            # cannot find pose using contours
            continue

        markers_list.append(detected_marker)

    # imgpoly = img.copy()
    # for cidx in range(1, len(polydtct_counters)):
    #     cv2.drawContours(imgpoly, polydtct_counters, cidx, (randint(0,255), randint(0,255), randint(0,255)), 3)
    # cv2.imshow("contours poly", imgpoly)
    # cv2.waitKey(1)

    return markers_list
Ejemplo n.º 51
0
#凸包性
import cv2

img = cv2.imread('image/16.jpg')

imggray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, th = cv2.threshold(imggray, 127, 255, 0)

contour, _ = cv2.findContours(th, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

hull = cv2.convexHull(contour[0])  #不光可以判断,还可以纠正

print(cv2.isContourConvex(contour[0]), cv2.isContourConvex(hull))

img_contour = cv2.drawContours(img, [hull], -1, (0, 0, 255), 3)

cv2.imshow("1", img_contour)
cv2.waitKey(0)
Ejemplo n.º 52
0
    green_mask = cv2.inRange(hsv, lower_green, upper_green)
    green = cv2.bitwise_and(frame, frame, mask=green_mask)

    # Contours detection
    # Opencv version: 3.x.x & 4.x.x
    contours, _ = cv2.findContours(green_mask, cv2.RETR_TREE,
                                   cv2.CHAIN_APPROX_SIMPLE)

    for i in range(0, len(contours)):
        approx = cv2.approxPolyDP(contours[i],
                                  cv2.arcLength(contours[i], True) * 0.0075,
                                  True)
        x = approx.ravel()[0]
        y = approx.ravel()[1]
        if (abs(cv2.contourArea(contours[i])) < 1000
                or not (cv2.isContourConvex(approx))):
            continue

        if len(approx) == 4:
            cv2.putText(green_mask, "Green Square", (x, y), font, 1,
                        (255, 0, 0))
            text_file = open("logs/Output.txt", "w")
            text_file.write("1")
            text_file.close()

    cv2.imshow("Frame", frame)
    cv2.imshow("Green Only", green_mask)

    key = cv2.waitKey(1)
    if key == 27:
        break
Ejemplo n.º 53
0
#th2=cv2.adaptiveThreshold(imagegray,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,11,2)
if __name__ == '__main__':
    # sd = shapedetector.ShapeDetector()
    # otsu = sd.otsu(HSV[:, :, 1])
    # HSV[:, :, 1] 得到图像的饱和度灰度图
    img2 = cv2.threshold(HSV[:, :, 1], 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
    kernel = np.ones((3, 3), np.uint8)  # 运算核
    mb = cv2.morphologyEx(img2, cv2.MORPH_CLOSE, kernel, iterations=3)  # 闭运算
    cnts = cv2.findContours(mb.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    cnts = cnts[0] if imutils.is_cv2() else cnts[1]
    cnts = sorted(cnts, key=cv2.contourArea, reverse=True)  # 按照面积由大到小排序
    for c in cnts:
        peri = cv2.arcLength(c, True)  # 计算周长
        approx = cv2.approxPolyDP(c, 0.01 * peri, True)  # 用多边形拟合形状,第二个参数取值一般是1-5%的周长
        if cv2.isContourConvex(approx):
            continue
        # cv2.drawContours(image, [approx], 0, (0, 255, 255), 3)
        cv2.drawContours(image, [c], -1, (0, 0, 255), 2)
        M = cv2.moments(c)  # 计算轮廓的矩
        #Hu_M = cv2.HuMoments(M)  # 计算7个不变矩
        if M['m00'] < 1000:
            break
        cx, cy = int(M['m10'] / M['m00']), int(M['m01'] / M['m00'])
        cv2.circle(image, (cx, cy), 3, (255, 0, 0), -1)  # 质心
        hull = cv2.convexHull(approx, returnPoints=False)  # 凸缺陷检测
        defects = cv2.convexityDefects(approx, hull)
        if (defects.shape[0]) != 2:
            continue
        p1 = []
        for i in range(2):
Ejemplo n.º 54
0
#MORPHOLOGICAL OPERATIONS
edges = cv2.Canny(img, CANNY_THRESH_1, CANNY_THRESH_2)
edges = cv2.dilate(edges, None)  #[DILATION]
edges = cv2.erode(
    edges, None)  #Decreased white region near image boundaries [EROSION]

#== Find contours in edges, sort by area ===========================================-
contour_info = []
_, contours, _ = cv2.findContours(edges, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
# Previously, for a previous version of cv2, this line was:
#  contours, _ = cv2.findContours(edges, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)

for c in contours:
    contour_info.append((
        c,
        cv2.isContourConvex(c),  #TRUE_OR_FALSE
        cv2.contourArea(c),  #Calculate contour area
    ))
contour_info = sorted(contour_info, key=lambda c: c[2], reverse=True)
max_contour = contour_info[0]

#== Create empty mask, draw filled polygon on it corresponding to largest contour ===
# Mask is black, polygon is white
mask = np.zeros(edges.shape)
cv2.fillConvexPoly(mask, max_contour[0], (255))

#== Smooth mask, then blur it ========================================================
mask = cv2.dilate(mask, None, iterations=MASK_DILATE_ITER)
mask = cv2.erode(mask, None, iterations=MASK_ERODE_ITER)
mask = cv2.GaussianBlur(mask, (BLUR, BLUR), 0)
mask_stack = np.dstack([mask] * 3)  # Create 3-channel alpha mask
Ejemplo n.º 55
0
    def findTicketTableContourCandidates(self, img):
        """
                Find the contours that are most likely to be the one that corresponds
                to the ticket table.  Most likely result is at 0th index of returned list.
		"""
        if img is None:
            return

        results = []

        # Create binary image
        img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        # _, img_binary = cv2.threshold(img_gray, 100, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
        img_binary = cv2.adaptiveThreshold(img_gray, 255,
                                           cv2.ADAPTIVE_THRESH_MEAN_C,
                                           cv2.THRESH_BINARY, 31, 10)

        # Save image
        if self.debug:
            cv2.imwrite('step-find-ticket-table-contour.png', img_binary)

        # Find contours
        contours, hierarchy = cv2.findContours(img_binary.copy(),
                                               cv2.RETR_TREE,
                                               cv2.CHAIN_APPROX_NONE)
        hierarchy = hierarchy[0]

        i = 0
        for contour in contours:

            contourHierarchy = hierarchy[i]

            features = self.getFeatures(img_binary, contours, contour, i,
                                        hierarchy)

            aspectRatio, percent_area, numSubContours, numRectSubContours, subContourAreaRatio, immediateSubcontourRatio = features

            contourLength = cv2.arcLength(contour, True)
            approxPolyContour = cv2.approxPolyDP(contour, 0.02 * contourLength,
                                                 True)

            # this works for tickets 0-7
            # if numSubContours > 100 and numRectSubContours > 20 and percent_area > 0.05 and percent_area < 0.38 and len(approxPolyContour) == 4 and cv2.isContourConvex(approxPolyContour) and subContourAreaRatio > 0.75 and immediateSubcontourRatio > 0.05:

            # this works for ticket 8 + all other tickets
            if numSubContours > 100 and numRectSubContours > 20 and subContourAreaRatio > 0.75 and immediateSubcontourRatio > 0.05 and percent_area < 0.38 and len(
                    approxPolyContour) == 4 and cv2.isContourConvex(
                        approxPolyContour) and percent_area < 0.38:

                print("\n\ncontour %s:\n\n" % i)
                print("aspectRatio: %s" % aspectRatio)
                print("percentArea: %s" % percent_area)
                print("numSubContours of %s: %s" % (i, numSubContours))
                print("rect subContours : %s" % numRectSubContours)
                print("subContourAreaRatio : %s" % subContourAreaRatio)
                print("immediateSubcontourRatio: %s" %
                      immediateSubcontourRatio)

                contourContext = ContourContext(contour, i, contours,
                                                hierarchy)

                results.append(contourContext)

            i += 1

        return results
Ejemplo n.º 56
0
contours, _ = cv2.findContours(src_bin, cv2.RETR_EXTERNAL,
                               cv2.CHAIN_APPROX_NONE)

for i in range(len(contours)):
    pts = contours[i]

    c = (random.randint(0, 255), random.randint(0,
                                                255), random.randint(0, 255))
    cv2.drawContours(dst1, contours, i, c, 1)

    # 너무 작은 객체는 제외
    if (cv2.contourArea(pts) < 1000):
        continue

    # 외곽선 근사화
    approx = cv2.approxPolyDP(pts, cv2.arcLength(pts, True) * 0.02, True)

    # 컨벡스가 아니면 제외
    if not cv2.isContourConvex(approx):
        continue

    if len(approx) == 4:
        cv2.drawContours(dst2, contours, i, c, 2)

cv2.imshow('src', src)
cv2.imshow('src_bin', src_bin)
cv2.imshow('dst1', dst1)
cv2.imshow('dst2', dst2)
cv2.waitKey()
cv2.destroyAllWindows()
Ejemplo n.º 57
0
    gray = cv2.bilateralFilter(gray, 7, 12, 12)

    v = np.median(gray)
    sigma = 50
    low = int(max(0, (1.0 - sigma / 100) * v))
    up = int(min(255, (1.0 + sigma / 100) * v))

    edged = cv2.Canny(gray, low, up)

    img, cnts, _ = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)

    for c in cnts:
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.0001 * peri, True)
        k = cv2.isContourConvex(c)
        if (k == False):
            M = cv2.moments(c)
            area = cv2.contourArea(approx)

            (x, y), radius = cv2.minEnclosingCircle(c)
            S = np.pi * radius * radius
            sol = area / float(S)

            hull = cv2.convexHull(c)
            hull_area = cv2.contourArea(hull)
            solidity = area / (hull_area + 1)

            if sol >= 0.8 and sol <= 1 and solidity >= 0.95:

                m = x
Ejemplo n.º 58
0
import cv2
import numpy as np

port = 1
bg = cv2.createBackgroundSubtractorMOG2()
camera = cv2.VideoCapture(port)
while (camera.isOpened()):
    ret, img = camera.read()
    if ret:
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    edge = cv2.Canny(gray, 50, 150, apertureSize=3)
    edge = cv2.GaussianBlur(gray, (5, 5), 0)
    ret, thresh = cv2.threshold(edge, 225, 255, cv2.THRESH_BINARY)
    thresh = cv2.erode(thresh, None, iterations=8)
    thresh = cv2.dilate(thresh, None, iterations=8)
    #bgmask = bg.apply(thresh)
    image, contours, hier = cv2.findContours(thresh, cv2.RETR_TREE,
                                             cv2.CHAIN_APPROX_SIMPLE)
    #contours =
    #bgmask = cv2.GaussianBlur(bgmask,(5,5),0)
    for cnt in contours:
        if not (cv2.isContourConvex(cnt)):
            hull = cv2.convexHull(cnt)
            cv2.drawContours(img, [hull], -1, (0, 255, 0), 2)
    cv2.imshow("test", img)
    k = cv2.waitKey(1)
    if k == 27:
        break
Ejemplo n.º 59
0
def procuraQuadrado(mascara):
    """!
        Procura o o quadrado do mostrador na imagem filtrada

        Função que busca, identifica, localiza e define as coordenadas
        de um quadrado caso seja identificado em uma figura.
        Define os contornos presentes na imagem, localiza os vértices dos
        contornos, desenvolve curvas ligando tais vértices (cv.approxPoly)
        e verifica a similaridade com uma reta. Caso todos os vértices ligados
        correspondam ao formato desejado, extrai as coordenadas de tais vértices
        e aponta-os como o quadrado.

        Parâmetros:
            @param mascara(np.array) - imagem previamente filtrada.

        Retorno:
            @returns Coordenadas dos vértices do quadrado obtido.
    """

    kernel = np.ones((5, 5), np.uint8)

    bordas = cv.Canny(mascara, 100, 500, kernel)

    contours = []
    hierarchy = []

    if (cv.__version__[0] == "4"):
        contours, hierarchy = cv.findContours(bordas, cv.RETR_EXTERNAL,
                                              cv.CHAIN_APPROX_NONE)
    else:
        _, contours, hierarchy = cv.findContours(bordas, cv.RETR_EXTERNAL,
                                                 cv.CHAIN_APPROX_NONE)

    quadrados = []

    for i in range(len(contours)):
        epsilon = 0.1 * cv.arcLength(contours[i], True)
        approx = cv.approxPolyDP(contours[i], epsilon, True)

        if (len(approx) < 4):
            continue

        if not cv.isContourConvex(approx):
            continue

        if cv.contourArea(approx) < 2000:
            continue

        quadrado = []

        for point in approx:
            quadrado.append([point[0][0], point[0][1]])

        quadrados.append(quadrado)

    quadrados = np.array(quadrados)

    i = 0

    while (len(quadrados) - 2 >= i):
        if np.linalg.norm(quadrados[i + 1][0] - quadrados[i][0]) < 5:
            quadrados = np.delete(quadrados, i, 0)

        i += 1

    i = 0

    while len(quadrados) > i:
        if np.linalg.norm(quadrados[i][0] - quadrados[i][1]) / np.linalg.norm(
                quadrados[i][2] - quadrados[i][1]) < 0.9:
            quadrados = np.delete(quadrados, i, 0)
        i += 1

    return quadrados
Ejemplo n.º 60
-6
def get_contour_data(contour, image):
    data = {}
    data["empty"] = cv2.contourArea(contour)<=3
    data["convex"] = cv2.isContourConvex(contour)
    data["rect"] = cv2.boundingRect(contour)
    x,y,w,h = data["rect"]
    data["size"] = max(w,h)#float(w+h)/2
    data["radius"] = math.sqrt(w**2+h**2)/2.0
    data["points"] = [(x,y),(x,y+h),(x+w,y+h),(x+w,y)]
    M = cv2.moments(np.array([[p] for p in data["points"]],dtype=np.int32))
    data["center"] = (M['m10']/(M['m00']+0.00001), M['m01']/(M['m00']+0.00001))
    #if the contours are circles instead of squares
    if not doc_parameters["squares"]:
        center, radius = cv2.minEnclosingCircle(contour)
        data["center"] = (int(center[0]),int(center[1]))
        data["radius"] = int(radius)
        new_radius = int((1-doc_parameters["selection_circle_padding"])*radius)

        mask = np.zeros(image.shape,np.uint8)
        cv2.ellipse(mask, (int(center[0]),int(center[1])), (new_radius, new_radius), 0, 0, 360, 255, -1)
        data["mean_intensity"] = cv2.mean(image,mask = mask)[0]

    else:
        b = doc_parameters["selection_box_padding"]/2.0
        fillarea = np.array([ [[x+b*w,y+b*h]] , [[x+b*w,y+h-b*h]] , [[x+w-b*w,y+h-b*h]] , [[x+w-b*w,y+b*h]] ], dtype=np.int32 )
        mask = np.zeros(image.shape,np.uint8)
        cv2.drawContours(mask,[fillarea],0,255,-1)
        #improve the calculation of the intensity that decides if it is selected or not.
        data["mean_intensity"] = cv2.mean(image,mask = mask)[0]

    # if doc_parameters["debug"]: print data["mean_intensity"]


    return data