def find_biggest_contour(image, contours, max_area=None, min_area=100 ** 2, max_border=(100, 100, 100, 100)):
    """
    Get biggest area contour in list of contours
    @param image: image contours processed on
    @param contours: list of contours (from cv2.findContours())
    @param max_area: Upper limit of contour area
    @param min_area: Lower limit of contour area
    @param max_border: Border around image where contour can exist
    @return: contour
    """
    if max_area is None:
        max_area = max(image.shape) ** 2

    ''' Contour searching algorithm '''
    # find initial contour data
    # noinspection PyPep8Naming
    M = cv2.moments(contours[-1])
    area = cv2.contourArea(contours[-1])
    cx = int(M['m10'] / M['m00'])
    cy = int(M['m01'] / M['m00'])
    image_x, image_y, _ = image.shape
    center_image_x, center_image_y = (image_x // 2, image_y // 2)
    best_area = area
    best_center = (cx, cy)
    best_contour = contours[-1]
    border_x_min = max_border[0]
    border_x_max = image_x + max_border[2]
    border_y_min = max_border[1]
    border_y_max = image_y + max_border[3]
    for i in range(len(contours)):
        # find area of contour
        area = cv2.contourArea(contours[i])

        # find moments of contour
        # noinspection PyPep8Naming
        M = cv2.moments(contours[i])

        # find center of mass of contour
        cx = int(M['m10'] / M['m00'])
        cy = int(M['m01'] / M['m00'])
        center = (cx, cy)

        if min_area < area < max_area:
            if border_x_min < cx < border_x_max and border_y_min < cy < border_y_max:
                best_area = area
                best_center = center
                best_contour = contours[i]

    return best_contour
예제 #2
0
def Range(img,parameters_dict):
    Range=np.array([])
    ZDistance=np.array([])
    Bearing=np.array([])
    Center=np.array([])
    GrayFiltimg=cv2.cvtColor(img,cv2.COLOR_HSV2BGR)
    GrayFiltimg=cv2.cvtColor(GrayFiltimg,cv2.COLOR_RGB2GRAY)
    Contour=cv2.findContours(GrayFiltimg,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
    if Contour == []:
        print("there is no lander here")
    else:
        Contour=imutils.grab_contours(Contour)
        for a in Contour:
            #find the center of the contour
            Moment=cv2.moments(a)
            Area=cv2.contourArea(a)
            Lx=int(Moment["m10"]/Moment["m00"])
            Ly=int(Moment["m01"]/Moment["m00"])
            cv2.circle(img, (Lx, Ly), 7, (255, 255, 255), -1)
            Centroid=np.array([Lx,Ly])
            Center=np.append(Center,Centroid)
            Lx1,Ly1,LWidth,LHeight=cv2.boundingRect(a)
            Distance=parameters_dict["Height"]*(f/LHeight)/2
            ZDistance=np.append(ZDistance,Distance)
            Bearing=np.append(Bearing,(Lx-320)*(31.1/320))
            Range=np.vstack((ZDistance,Bearing)).T#Put Bearing and ZDistance into one array and arrange
            #columnwise
            Range=Range[Range[:,0].argsort()] 
            #if positive then it's to the right if negative then to left of center 
    #ZDistance=np.sort(ZDistance)   
    return Range
예제 #3
0
def Find_Contour(img_list):
    depth = 3
    convexHull = []
    Hu_list = []
    contour_ls = []
    MIN_AREA = 2000  #检测的轮廓的最小面积

    for img in img_list:

        contours, _ = cv2.findContours(img, cv2.RETR_EXTERNAL,
                                       cv2.CHAIN_APPROX_SIMPLE)
        #当len为0时表示没有找到轮廓,当len大于60时表示受到的干扰过大
        # #避免当contours为空时引发max函数错误而退出程序的情况
        length = len(contours)
        if 0 < length <= 60:
            contours.sort(key=lambda x: cv2.contourArea(x), reverse=True)

            for i in range(depth):
                if length > i and cv2.contourArea(contours[i]) >= MIN_AREA:
                    contour_ls.append(contours[i])
                    convexHull.append(ConvexHull_Cal(contours[i]))
                    M = cv2.moments(contours[i])
                    Hu_list.append(cv2.HuMoments(M))
                else:
                    break

    if Hu_list:
        Hu_array = np.array(Hu_list)
        Hu_array = np.squeeze(Hu_array)

    return contour_ls, convexHull, Hu_array
예제 #4
0
def identify_countors(image):
    stations = []
    image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    image_invert = cv2.bitwise_not(image_gray)
    contours, _ = cv2.findContours(image_invert, cv2.RETR_EXTERNAL,
                                   cv2.CHAIN_APPROX_NONE)

    for cnt in contours:
        approx = cv2.approxPolyDP(cnt, 0.02 * cv2.arcLength(cnt, True), True)
        M = cv2.moments(cnt)
        cx = int(M["m10"] / M["m00"])
        cy = int(M["m01"] / M["m00"])
        x, y, w, h = cv2.boundingRect(cnt)

        # detect if the square is rotationed
        # TODO: detect in some other way
        tilted = cnt.ravel()[0] == cx

        station = {
            "type": get_type_by_edges(len(approx), tilted),
            "pos": (x, y),
            "centroid": (cx, cy),
            "size": (w, h),
            "contour": cnt
        }
        stations.append(station)

    return stations
예제 #5
0
def GetRightPos(image):
    # 边缘检测
    canny = cv2.Canny(image, 200, 400)

    # 轮廓提取
    img, contours, _ = cv2.findContours(canny, cv2.RETR_TREE,
                                        cv2.CHAIN_APPROX_SIMPLE)

    rightRectangles = []
    for i, contour in enumerate(contours):
        M = cv2.moments(contour)

        if M['m00'] == 0:
            cx, cy = 0, 0
        else:
            cx, cy = M['m10'] / M['m00'], M['m01'] / M['m00']

        if 1000 < cv2.contourArea(contour) < 1300 and 120 < cv2.arcLength(
                contour, True) < 400:
            if cx > 100:
                x, y, w, h = cv2.boundingRect(contour)  # 外接矩形
                rightRectangles.append((x, y, w, h))

    if len(rightRectangles) > 0:
        # 内侧方块
        current = min(rightRectangles, key=lambda s: s[2] * s[3])
        x, y, w, h = current[0], current[1], current[2], current[3]
        return x, y, w, h

    return 0, 0, 0, 0
예제 #6
0
def detect_ball(frame):
    x, y, radius = -1, -1, -1
    hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    mask = cv2.inRange(hsv_frame, HSV_lower, HSV_upper)
    mask = cv2.erode(mask, None, iterations=0)
    mask = cv2.dilate(mask, None, iterations=12)
    im2, contours, hierarchy = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                                                cv2.CHAIN_APPROX_SIMPLE)
    center = (-1, -1)

    # only proceed if at least one contour was found
    if len(contours) > 0:
        # find the largest contour in the mask, then use
        # it to compute the minimum enclosing circle and
        # centroid
        c = max(contours, key=cv2.contourArea)
        ((x, y), radius) = cv2.minEnclosingCircle(c)
        xList.append(x)  # x coordinates
        xPath.append((x - xStart) / pathLength)  # path traveled
        M = cv2.moments(mask)
        center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))

        # check that the radius is larger than some threshold
        if radius > minRadius:
            #outline ball
            cv2.circle(frame, (int(x), int(y)), int(radius), (255, 0, 0), 2)
            #show ball center
            cv2.circle(frame, center, 5, (0, 255, 0), -1)

    return center[0], center[1], radius
def FindContourCenter(Input_contours):
    output_centroid = []
    for contour in Input_contours:
        my_moment = cv.moments(contour, False)
        centroid = np.array([my_moment['m10'] / my_moment['m00'], my_moment['m01'] / my_moment['m00']])
        output_centroid.append(centroid)
    return output_centroid
예제 #8
0
def measure_object(image):
    gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
    ret, binary = cv.threshold(gray, 0, 255, cv.THRESH_BINARY | cv.THRESH_OTSU)
    print("threshold value:%s" % ret)
    cv.imshow("binary image", binary)
    dst = cv.cvtColor(binary, cv.COLOR_GRAY2BGR)
    contours, hireachy = cv.findContours(binary, cv.RETR_EXTERNAL,
                                         cv.CHAIN_APPROX_SIMPLE)
    for i, contour in enumerate(contours):
        area = cv.contourArea(contour)
        x, y, w, h = cv.boundingRect(contour)
        mm = cv.moments(contour)
        if mm['m00']:
            cx = mm['m10'] / mm['m00']
            cy = mm['m01'] / mm['m00']
        else:
            continue
        cv.circle(dst, (np.int(cx), np.int(cy)), 3, (0, 0, 255), -1)
        cv.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)  #绘制矩形边界
        """
        print("area",area)
        approxCurve = cv.approxPolyDP(contour,4,True)#多边形逼近
        if approxCurve.shape[0] > 6:#边数大于6条的
            cv.drawContours(dst,contours,i,(0,0,255),2)
        if approxCurve.shape[0] == 4:#矩形
            cv.drawContours(dst,contours,i,(0,255,0),2)
        if approxCurve.shape[0] == 3:#三角形
            cv.drawContours(dst,contours,i,(255,0,0),2)
        """
    cv.imshow("image", dst)
    cv.imshow("souce", image)
 def detect_centrode(self,res):        
     # Adapted from 
     # https://stackoverflow.com/questions/54425093/
     # /how-can-i-find-the-center-of-the-pattern-and-the-distribution-of-a-color-around)
     contours, hierarchy = cv2.findContours(res, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
     areas = []
     centersX = []
     centersY = []
     for cnt in contours:
         areas.append(cv2.contourArea(cnt))
         M = cv2.moments(cnt)
         try:
             centersX.append(int(M["m10"] / M["m00"]))
             centersY.append(int(M["m01"] / M["m00"]))    
         except ZeroDivisionError as error:
             # Output expected ZeroDivisionErrors.
             centersX.append(int(M["m10"]))
             centersY.append(int(M["m01"]))   
             pass    
     full_areas = np.sum(areas)
     acc_X = 0
     acc_Y = 0
     for i in range(len(areas)):
         acc_X += centersX[i] * (areas[i]/full_areas) 
         acc_Y += centersY[i] * (areas[i]/full_areas)
     return acc_X,acc_Y, full_areas
예제 #10
0
 def segment(self, np_img):
     results = self.segmentation_detector.predict(np_img)
     classes = [
         "PCB", "BottomCover", "BlueCover", "WhiteCover", "BlackCover"
     ]
     masks = []
     for i in range(len(results["instances"].pred_classes)):
         mask_image = results["instances"].pred_masks[i].cpu().numpy()
         mask_image = np.asarray(mask_image * 255, dtype=np.uint8)
         moments = cv2.moments(mask_image)
         cX = int(moments["m10"] / moments["m00"])
         cY = int(moments["m01"] / moments["m00"])
         center = (cX, cY)
         area = moments["m00"]
         part = classes[results['instances'].pred_classes[i]]
         score = results['instances'].scores[i]
         mask = {
             "part": part,
             "score": score,
             "area": area,
             "center": center,
             "ignored": False,
             "ignore_reason": "",
             "mask": mask_image
         }
         masks.append(mask)
     return masks
def contourCenter(contour):
    M = cv2.moments(contour)  #Get moments from found contour
    cx = -1
    cy = -1
    if M['m00'] != 0:
        cx = int(M['m10'] / M['m00'])  # X moment coordinate
        cy = int(M['m01'] / M['m00'])  # Y moment coordinate
    return cx, cy
예제 #12
0
def get_object_centroid(image):
    #find the moments of the binary image
    imageMoments = cv2.moments(image)
    x = int(imageMoments["m10"] / imageMoments["m00"])
    y = int(imageMoments["m01"] / imageMoments["m00"])

    #return co ordinates of central point
    return x, y
예제 #13
0
def deskew(img):
    m = cv2.moments(img)
    if abs(m['mu02']) < 1e-2:
        return img.copy()
    skew = m['mu11']/m['mu02']
    M = np.float32([[1, skew, -0.5*SZ*skew], [0, 1, 0]])
    img = cv2.warpAffine(img,M,(SZ, SZ),flags=affine_flags)
    return img
 def getCentre(self, contour):
     M = cv2.moments(contour)    
     try:
         cX = int(M["m10"] / M["m00"]) #TODO: Can get a zero division error  make try and catch
         cY = int(M["m01"] / M["m00"])
     except:
         cX = 0
         cY = 0
     return cX, cY
예제 #15
0
def moment():

    ret, thr = cv2.threshold(imgray, 127, 255, 0)
    contours, _ = cv2.findContours(thr, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

    contour = contours[0]
    mmt = cv2.moments(contour)

    for key, val in mmt.items():
        print('%s:\t%.5f' % (key, val))
예제 #16
0
def get_inside_face_cnt(emoji_cnts, face_cnt):
    filtered = []
    max_x, max_y , max_w, max_h = cv2.boundingRect(face_cnt)
    for cnt in emoji_cnts:
        M = cv2.moments(cnt)
        cx = int(M['m10'] / M['m00'])
        cy = int(M['m01'] / M['m00'])
        if cx > max_x and cx < max_w and cy > max_y and cy < max_h:
            filtered.append(cnt)
    
    return filtered
예제 #17
0
def get_mouth(emoji_cnts):
    lower_y = -10
    lower_cnt = None
    for cnt in emoji_cnts:
        M = cv2.moments(cnt)
        cy = int(M['m01']/M['m00'])
        #print(cy)
        if cy > lower_y:
            lower_y = cy
            lower_cnt = cnt
    return lower_cnt
예제 #18
0
    def callback(self, data):
        try:
            cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError as e:
            print(e)

        # (rows, cols, channels) = cv_image.shape
        # if cols > 60 and rows > 60:
        #    cv2.circle(cv_image, (50, 50), 10, 255)
        # cv2.imshow("Image window", cv_image)
        # cv2.waitKey(3)

        if (self.set_s == 0):
            self.prev_cv_image = cv_image.copy()
            self.set_s = 1
            binary = cv_image.copy()
            # print("Here Once")

        curr_gray = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
        prev_gray = cv2.cvtColor(self.prev_cv_image, cv2.COLOR_BGR2GRAY)

        frame_diff = cv2.absdiff(curr_gray, prev_gray)

        ret, thresh = cv2.threshold(frame_diff, 30, 255, cv2.THRESH_BINARY)

        kernel = np.ones((3, 3), np.uint8)
        dilated = cv2.dilate(thresh, kernel, iterations=1)

        cnts = cv2.findContours(dilated.copy(), cv2.RETR_TREE,
                                cv2.CHAIN_APPROX_SIMPLE)
        cnts = imutils.grab_contours(cnts)
        ctemp_c = cv_image.copy()
        valid_cntr = []
        for c in cnts:
            #(x, y, w, h) = cv2.boundingRect(c)
            M = cv2.moments(c)

            self.cX = int(M["m10"] / M["m00"])
            self.cY = int(M["m01"] / M["m00"])

        cv2.drawContours(ctemp_c, cnts, -1, (127, 200, 0), 2)
        cv2.circle(ctemp_c, (self.cX, self.cY), 7, (255, 255, 255), -1)

        print("center", self.cX, self.cY)
        # Detect blobs(groups of pixels)
        # image = self.DetectBlobs(morphed, cv_image)  # Detect groups using countour area (Green formula)

        cv2.imshow("Image window", cv_image)
        cv2.imshow('frame diff ', frame_diff)
        cv2.imshow("contour", ctemp_c)
        cv2.waitKey(1)
        self.prev_cv_image = cv_image.copy()
예제 #19
0
def edge_measure(image, contours):
    for i, contour in enumerate(contours):  # 遍历全部轮廓
        area = cv.contourArea(contour)
        # cv.boundingRect返回四个参数(x,y)为矩形左上角的坐标,(w,h)是矩形的宽和高
        x, y, w, h = cv.boundingRect(contour)  # 外接矩形大小
        rate = min(w, h) / max(w, h)  # 宽高比
        # 计算图像中的中心矩
        mm = cv.moments(contour)
        cx = mm["m10"] / mm["m00"]
        cy = mm["m01"] / mm["m00"]  # 几何图形的中心位置 , mm是字典类型
        cv.circle(image, (np.int(cx), np.int(cy)), 2, (0, 255, 255), -1)
        cv.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)  # 外接矩形
        print("contour area ", area)
예제 #20
0
def checkIfRectangle(image, singleContour):
    #dependent on the global 'image' variable
    M = cv2.moments(singleContour)
    contourPerimeter = cv2.arcLength(singleContour, True)
    approximation = cv2.approxPolyDP(singleContour, 0.09 * contourPerimeter,
                                     True)

    if len(approximation) == 4:
        if M['m00'] > 100:
            groupX = np.sort([x[0][0] for x in approximation])
            groupY = np.sort([x[0][1] for x in approximation])

            if image[groupY[0] + 3][groupX[3] -
                                    3][0] == 112 or image[groupY[0] +
                                                          3][groupX[3] -
                                                             3][1] == 112:

                #########getting what we want
                xPosMouse = int(np.mean(groupX))
                yPosMouse = int(groupY[0] + yPosArrange *
                                (groupY[3] - groupY[0]))

                mouse.click(button='right', coords=(xPosMouse, yPosMouse))
                keyboard.send_keys('o')
                sleep(.3)

                with mss.mss() as sct:
                    img = sct.grab({
                        'top': 144,
                        'left': 14,
                        'height': 1,
                        'width': 1
                    })

                    if img.pixel(0, 0) != (236, 233, 216):
                        return False,

                    else:
                        keyboard.send_keys('{ESC}')
                        return True, (groupX[3], groupY[3]), (xPosMouse,
                                                              yPosMouse)
                        # first for checking whether it is a rectangle
                        # second for writing tag number on the taglistpicture.png
                        # third for moving mouse pointer to do the capture

            else:
                return False,
        else:
            return False,
    else:
        return False,
예제 #21
0
def Gestures_Detect(hand, sample_list, fourier_des_ls):
    ndefects = 0

    sign, large_cout = Find_Contour(hand, sample_list, fourier_des_ls)
    if sign == False:
        ndefects = 11  #返回contours为空的信息,只作调试用
        center = tuple([a // 2 for a in reversed(hand.shape)])  #返回图像的中心坐标
        return hand, ndefects, center

    black2 = np.ones(hand.shape, np.uint8)  #创建黑色幕布
    cv2.drawContours(black2, large_cout, -1, (255, 255, 255), 2)  #绘制白色轮廓
    cv2.imshow('large_cout', black2)

    hull = cv2.convexHull(large_cout, returnPoints=False)
    defects = cv2.convexityDefects(large_cout, hull)
    _, radius = cv2.minEnclosingCircle(large_cout)

    if defects is not None:
        for i in range(defects.shape[0]):
            s, e, f, _ = defects[i, 0]
            sta = tuple(large_cout[s][0])
            end = tuple(large_cout[e][0])
            far = tuple(large_cout[f][0])
            B = scfun.Eucledian_Distance(sta, far)
            C = scfun.Eucledian_Distance(end, far)
            #过滤掉角边太短的角
            if B + C > radius:
                A = scfun.Eucledian_Distance(sta, end)  #底边
                angle = acos((B**2 + C**2 - A**2) / (2 * B * C))

                if angle <= pi / 2.5:
                    ndefects += 1
    else:
        ndefects = 12
    '''
    test=scfun.Fourier_Descriptor(large_cout[:,0,:],Normalize=True)
    similar=scfun.Eucledian_Distance(test,fourier_des_ls[0])
    print('{:.5f}  {:.5f}'.format(similar,log(similar)))
    '''
    M = cv2.moments(large_cout)
    center = (int(M['m10'] / M['m00']), int(M['m01'] / M['m00']))  #手部的质心坐标

    x, y, w, h = cv2.boundingRect(large_cout)

    hand = cv2.cvtColor(hand, cv2.COLOR_GRAY2BGR)  #将灰度图像转换为BGR以显示绿色方框
    hand = cv2.rectangle(hand, (x, y), (x + w, y + h), (0, 255, 0), 2)

    return hand, ndefects, center
예제 #22
0
    def calc_centroid(img):
        """Get the centroid and area of green in the image"""

        hue_image = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        low_range = np.array([165, 145, 30])
        high_range = np.array([180, 200, 58])
        th = cv2.inRange(hue_image, low_range, high_range)
        # cv2.imshow("img", img)
        # cv2.waitKey(1)

        M = cv2.moments(th, binaryImage=True)
        if M["m00"] >= min_prop * width * height:
            cx = int(M["m10"] / M["m00"])
            cy = int(M["m01"] / M["m00"])
            return [cx, cy, int(M["m00"])]
        else:
            return [-1, -1, -1]
예제 #23
0
def calc_moments(contours, min_value=0, max_value=640) -> []:
    """
    moment: weighted average of pixel intensity

    Args:
        contours: list of contours
        min_value: minimum area of contour
        max_value: maximum area of contour

    Returns:
        :return list of moments from contours
    """
    M = []
    for c in contours:
        if min_value <= cv2.contourArea(c) <= max_value:
            M.append(cv2.moments(c))
    return M
예제 #24
0
def center_image(image):
    height, width = image.shape
    wi = (width / 2)
    he = (height / 2)

    ret, thresh = cv2.threshold(image, 95, 255, 0)

    M = cv2.moments(thresh)

    cX = int(M["m10"] / M["m00"])
    cY = int(M["m01"] / M["m00"])

    offsetX = (wi - cX)
    offsetY = (he - cY)
    T = np.float32([[1, 0, offsetX], [0, 1, offsetY]])
    centered_image = cv2.warpAffine(image, T, (width, height))

    return centered_image
예제 #25
0
def draw_contours(image,
                  coordinates,
                  label,
                  font_color,
                  border_color=COLOR_RED,
                  line_thickness=1,
                  font=open_cv.FONT_HERSHEY_SIMPLEX,
                  font_scale=0.5):
    open_cv.drawContours(image, [coordinates],
                         contourIdx=-1,
                         color=border_color,
                         thickness=2,
                         lineType=open_cv.LINE_8)
    moments = open_cv.moments(coordinates)

    center = (int(moments["m10"] / moments["m00"]) - 3,
              int(moments["m01"] / moments["m00"]) + 3)

    open_cv.putText(image, label, center, font, font_scale, font_color,
                    line_thickness, open_cv.LINE_AA)
예제 #26
0
def extract_face_features(img):
    original = img.copy()
    #plt_show_img(img)
    img = process_emoji(img)
    
    filtered_cnt, max_cnt = get_contours(img)
    new_filter = get_inside_face_cnt(filtered_cnt, max_cnt)
    emoji_fts = []
    #mouth_emoji2 = get_mouth(new_filter)

    for mouth_emoji in new_filter:
        if mouth_emoji is None:
            continue
        M = cv2.moments(mouth_emoji)
        cy = int(M['m01']/M['m00'])
        x,y,w,h = cv2.boundingRect(mouth_emoji)
        roi = original[y:y+h, x:x+w].copy()
        emoji_fts.append(roi)
        #plt_show_img(roi)
    
    return emoji_fts
 def image_processing(frame):
     green_lower = (29, 86, 6)
     green_upper = (64, 255, 255)
     blurred = cv2.GaussianBlur(frame, (11, 11), 0)
     hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
     mask = cv2.inRange(hsv, green_lower, green_upper)
     mask = cv2.erode(mask, None, iterations=2)
     mask = cv2.dilate(mask, None, iterations=2)
     contour = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
     contour = imutils.grab_contours(contour)
     if len(contour) > 0:
         c = max(contour, key=cv2.contourArea)
         ((x, y), radius) = cv2.minEnclosingCircle(c)
         m = cv2.moments(c)
         center = (int(m["m10"] / m["m00"]), int(m["m01"] / m["m00"]))
         if radius > 15:
             cv2.circle(frame, (int(x), int(y)), int(radius), (0, 255, 0),
                        2)
             cv2.circle(frame, center, 5, (255, 0, 0), -1)
     return frame
예제 #28
0
def calculate_area_perimeter(rgb_img, color=(255, 0, 255), thickness=4):
    gray_img = cv2.cvtColor(rgb_img, cv2.COLOR_RGB2GRAY)
    thresh = adaptative_biarization(gray_img)
    contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE,
                                           cv2.CHAIN_APPROX_SIMPLE)

    index = -1
    h, w, _ = rgb_img.shape

    objects = np.zeroes([h, w, 1], 'uint8')
    for c in contours:
        cv2.drawContours(objects, [c], index, color, -1)
        area = cv2.contourArea(c)
        perimeter = cv2.arcLength(c, closed=True)

        Moment = cv2.moments(c)
        cx = int(Moment['m10'] / Moment['m00'])
        cy = int(Moment['m01'] / Moment['m00'])
        cv2.circle(objects, (cx, cy), 4, (0, 0, 255), -1)
        print(f'Area: {area}, Perimeter:{perimeter}')

    return objects
예제 #29
0
def readFrame():
    ret, frame = camera.read()

    if ret:
        # Process frame @ lower quality level
        frame = imutils.resize(frame, width=600)
        blurred = cv.GaussianBlur(frame, (11, 11), 0)
        hsv = cv.cvtColor(blurred, cv.COLOR_BGR2HSV)

        mask = cv.inRange(hsv, colorUpLower, colorUpUpper)
        mask = cv.erode(mask, None, iterations=2)
        mask = cv.dilate(mask, None, iterations=2)

        cv.imshow('Mask', mask)

        # Pull contours to draw
        contours = cv.findContours(
            mask.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
        contours = imutils.grab_contours(contours)
        center = None

        if len(contours) > 0:
            maxC = max(contours, key=cv.contourArea)
            ((x, y), radius) = cv.minEnclosingCircle(maxC)
            M = cv.moments(maxC)
            center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))

            if radius > RADIUS:
                cv.circle(frame, (int(x), int(y)),
                          int(radius), (0, 255, 255), 2)
                cv.circle(frame, center, 5, (0, 0, 255), -1)
                global frame_count_up
                frame_count_up = frame_count_up + 1

        frame = cv.flip(frame, 1)
        cv.imshow('Frame', frame)
예제 #30
0
def geContours(img,imgContour):
    contours, heirarchy = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    for cnt in contours:
        area = cv2.contourArea(cnt)
        areaMin = 100
        if area > areaMin:
            cv2.drawContours(imgContour, cnt, -1, (255, 0, 255), 5)
            peri = cv2.arcLength(cnt, True)
            approx = cv2.approxPolyDP(cnt, 0.02 * peri, True)

            x , y , w, h = cv2.boundingRect(approx)
            cv2.rectangle(imgContour, (x , y ), (x + w , y + h ), (0, 255, 0), 5)
            
            M = cv2.moments(cnt)
            cx = int(M["m10"]/M["m00"])
            cy = int(M["m01"]/M["m00"])

            cv2.circle(imgContour, (cx, cy), 7, (255, 255, 255), -1)

            rotrect = cv2.minAreaRect(cnt)
            box = cv2.boxPoints(rotrect)
            box = np.int0(box)

            angle = rotrect[-1]
            if angle < -45:
                angle = -(90 + angle)
            else:
                angle = -angle

            print((round(angle)),"deg")
            print((cx, cy),"")
 
            cv2.putText(imgContour, "Area: " + str(int(area)), (x + w + 20, y + 35), cv2.FONT_HERSHEY_COMPLEX, 0.4,
                        (0, 255, 0), 1)
            cv2.putText(imgContour, "Degree: " + str(int(angle)), (x + w + 20, y + 50), cv2.FONT_HERSHEY_COMPLEX, 0.4,
                        (0, 255, 0), 1)